#include "iommu-pages.h"
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#define IOPTDESC_MATCH(pg_elm, elm) \
static_assert(offsetof(struct page, pg_elm) == \
offsetof(struct ioptdesc, elm))
IOPTDESC_MATCH(flags, __page_flags);
IOPTDESC_MATCH(lru, iopt_freelist_elm);
IOPTDESC_MATCH(mapping, __page_mapping);
IOPTDESC_MATCH(private, _private);
IOPTDESC_MATCH(page_type, __page_type);
IOPTDESC_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
IOPTDESC_MATCH(memcg_data, memcg_data);
#endif
#undef IOPTDESC_MATCH
static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
static inline size_t ioptdesc_mem_size(struct ioptdesc *desc)
{
return 1UL << (folio_order(ioptdesc_folio(desc)) + PAGE_SHIFT);
}
void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
{
struct ioptdesc *iopt;
unsigned long pgcnt;
struct folio *folio;
unsigned int order;
if (WARN_ON(gfp & __GFP_HIGHMEM))
return NULL;
order = get_order(size);
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
if (unlikely(!folio))
return NULL;
iopt = folio_ioptdesc(folio);
iopt->incoherent = false;
pgcnt = 1UL << order;
mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
return folio_address(folio);
}
EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
static void __iommu_free_desc(struct ioptdesc *iopt)
{
struct folio *folio = ioptdesc_folio(iopt);
const unsigned long pgcnt = folio_nr_pages(folio);
if (IOMMU_PAGES_USE_DMA_API)
WARN_ON_ONCE(iopt->incoherent);
mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
folio_put(folio);
}
void iommu_free_pages(void *virt)
{
if (!virt)
return;
__iommu_free_desc(virt_to_ioptdesc(virt));
}
EXPORT_SYMBOL_GPL(iommu_free_pages);
void iommu_put_pages_list(struct iommu_pages_list *list)
{
struct ioptdesc *iopt, *tmp;
list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
__iommu_free_desc(iopt);
}
EXPORT_SYMBOL_GPL(iommu_put_pages_list);
int iommu_pages_start_incoherent(void *virt, struct device *dma_dev)
{
struct ioptdesc *iopt = virt_to_ioptdesc(virt);
dma_addr_t dma;
if (WARN_ON(iopt->incoherent))
return -EINVAL;
if (!IOMMU_PAGES_USE_DMA_API) {
iommu_pages_flush_incoherent(dma_dev, virt, 0,
ioptdesc_mem_size(iopt));
} else {
dma = dma_map_single(dma_dev, virt, ioptdesc_mem_size(iopt),
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, dma))
return -EINVAL;
if (WARN_ON(dma != virt_to_phys(virt))) {
dma_unmap_single(dma_dev, dma, ioptdesc_mem_size(iopt),
DMA_TO_DEVICE);
return -EOPNOTSUPP;
}
}
iopt->incoherent = 1;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_pages_start_incoherent);
int iommu_pages_start_incoherent_list(struct iommu_pages_list *list,
struct device *dma_dev)
{
struct ioptdesc *cur;
int ret;
list_for_each_entry(cur, &list->pages, iopt_freelist_elm) {
if (WARN_ON(cur->incoherent))
continue;
ret = iommu_pages_start_incoherent(
folio_address(ioptdesc_folio(cur)), dma_dev);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(iommu_pages_start_incoherent_list);
#if IOMMU_PAGES_USE_DMA_API
void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
struct device *dma_dev)
{
struct ioptdesc *cur;
list_for_each_entry(cur, &list->pages, iopt_freelist_elm) {
struct folio *folio = ioptdesc_folio(cur);
if (!cur->incoherent)
continue;
dma_unmap_single(dma_dev, virt_to_phys(folio_address(folio)),
ioptdesc_mem_size(cur), DMA_TO_DEVICE);
cur->incoherent = 0;
}
}
EXPORT_SYMBOL_GPL(iommu_pages_stop_incoherent_list);
void iommu_pages_free_incoherent(void *virt, struct device *dma_dev)
{
struct ioptdesc *iopt = virt_to_ioptdesc(virt);
if (iopt->incoherent) {
dma_unmap_single(dma_dev, virt_to_phys(virt),
ioptdesc_mem_size(iopt), DMA_TO_DEVICE);
iopt->incoherent = 0;
}
__iommu_free_desc(iopt);
}
EXPORT_SYMBOL_GPL(iommu_pages_free_incoherent);
#endif