#include <linux/sizes.h>
#include <linux/vfio_pci_core.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>
#include <linux/memory-failure.h>
#define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
#define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
#define MEMBLK_SIZE SZ_512M
#define DVSEC_BITMAP_OFFSET 0xA
#define MIG_SUPPORTED_WITH_CACHED_RESMEM BIT(0)
#define GPU_CAP_DVSEC_REGISTER 3
#define C2C_LINK_BAR0_OFFSET 0x1498
#define HBM_TRAINING_BAR0_OFFSET 0x200BC
#define STATUS_READY 0xFF
#define POLL_QUANTUM_MS 1000
#define POLL_TIMEOUT_MS (30 * 1000)
struct mem_region {
phys_addr_t memphys;
size_t memlength;
size_t bar_size;
__le64 bar_val;
union {
void *memaddr;
void __iomem *ioaddr;
};
struct pfn_address_space pfn_address_space;
};
struct nvgrace_gpu_pci_core_device {
struct vfio_pci_core_device core_device;
struct mem_region usemem;
struct mem_region resmem;
struct mutex remap_lock;
bool has_mig_hw_bug;
bool reset_done;
};
static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
nvdev->resmem.bar_val = 0;
nvdev->usemem.bar_val = 0;
}
static struct mem_region *
nvgrace_gpu_memregion(int index,
struct nvgrace_gpu_pci_core_device *nvdev)
{
if (index == USEMEM_REGION_INDEX)
return &nvdev->usemem;
if (nvdev->resmem.memlength && index == RESMEM_REGION_INDEX)
return &nvdev->resmem;
return NULL;
}
static int pfn_memregion_offset(struct nvgrace_gpu_pci_core_device *nvdev,
unsigned int index,
unsigned long pfn,
pgoff_t *pfn_offset_in_region)
{
struct mem_region *region;
unsigned long start_pfn, num_pages;
region = nvgrace_gpu_memregion(index, nvdev);
if (!region)
return -EINVAL;
start_pfn = PHYS_PFN(region->memphys);
num_pages = region->memlength >> PAGE_SHIFT;
if (pfn < start_pfn || pfn >= start_pfn + num_pages)
return -EFAULT;
*pfn_offset_in_region = pfn - start_pfn;
return 0;
}
static inline
struct nvgrace_gpu_pci_core_device *vma_to_nvdev(struct vm_area_struct *vma);
static int nvgrace_gpu_pfn_to_vma_pgoff(struct vm_area_struct *vma,
unsigned long pfn,
pgoff_t *pgoff)
{
struct nvgrace_gpu_pci_core_device *nvdev;
unsigned int index =
vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
pgoff_t vma_offset_in_region = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
pgoff_t pfn_offset_in_region;
int ret;
nvdev = vma_to_nvdev(vma);
if (!nvdev)
return -ENOENT;
ret = pfn_memregion_offset(nvdev, index, pfn, &pfn_offset_in_region);
if (ret)
return ret;
if (pfn_offset_in_region < vma_offset_in_region)
return -EFAULT;
*pgoff = vma->vm_pgoff +
(pfn_offset_in_region - vma_offset_in_region);
return 0;
}
static int
nvgrace_gpu_vfio_pci_register_pfn_range(struct vfio_device *core_vdev,
struct mem_region *region)
{
unsigned long pfn, nr_pages;
pfn = PHYS_PFN(region->memphys);
nr_pages = region->memlength >> PAGE_SHIFT;
region->pfn_address_space.node.start = pfn;
region->pfn_address_space.node.last = pfn + nr_pages - 1;
region->pfn_address_space.mapping = core_vdev->inode->i_mapping;
region->pfn_address_space.pfn_to_vma_pgoff = nvgrace_gpu_pfn_to_vma_pgoff;
return register_pfn_address_space(®ion->pfn_address_space);
}
static int nvgrace_gpu_open_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
int ret;
ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
if (nvdev->usemem.memlength) {
nvgrace_gpu_init_fake_bar_emu_regs(core_vdev);
mutex_init(&nvdev->remap_lock);
}
ret = vfio_pci_core_setup_barmap(vdev, 0);
if (ret)
goto error_exit;
if (nvdev->resmem.memlength) {
ret = nvgrace_gpu_vfio_pci_register_pfn_range(core_vdev, &nvdev->resmem);
if (ret && ret != -EOPNOTSUPP)
goto error_exit;
}
ret = nvgrace_gpu_vfio_pci_register_pfn_range(core_vdev, &nvdev->usemem);
if (ret && ret != -EOPNOTSUPP)
goto register_mem_failed;
vfio_pci_core_finish_enable(vdev);
return 0;
register_mem_failed:
if (nvdev->resmem.memlength)
unregister_pfn_address_space(&nvdev->resmem.pfn_address_space);
error_exit:
vfio_pci_core_disable(vdev);
return ret;
}
static void nvgrace_gpu_close_device(struct vfio_device *core_vdev)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
if (nvdev->resmem.memlength)
unregister_pfn_address_space(&nvdev->resmem.pfn_address_space);
unregister_pfn_address_space(&nvdev->usemem.pfn_address_space);
if (nvdev->usemem.memaddr) {
memunmap(nvdev->usemem.memaddr);
nvdev->usemem.memaddr = NULL;
}
if (nvdev->resmem.ioaddr) {
iounmap(nvdev->resmem.ioaddr);
nvdev->resmem.ioaddr = NULL;
}
mutex_destroy(&nvdev->remap_lock);
vfio_pci_core_close_device(core_vdev);
}
static int nvgrace_gpu_wait_device_ready(void __iomem *io)
{
unsigned long timeout = jiffies + msecs_to_jiffies(POLL_TIMEOUT_MS);
do {
if ((ioread32(io + C2C_LINK_BAR0_OFFSET) == STATUS_READY) &&
(ioread32(io + HBM_TRAINING_BAR0_OFFSET) == STATUS_READY))
return 0;
msleep(POLL_QUANTUM_MS);
} while (!time_after(jiffies, timeout));
return -ETIME;
}
static int
nvgrace_gpu_check_device_ready(struct nvgrace_gpu_pci_core_device *nvdev)
{
struct vfio_pci_core_device *vdev = &nvdev->core_device;
int ret;
lockdep_assert_held_read(&vdev->memory_lock);
if (!nvdev->reset_done)
return 0;
if (!__vfio_pci_memory_enabled(vdev))
return -EIO;
ret = nvgrace_gpu_wait_device_ready(vdev->barmap[0]);
if (ret)
return ret;
nvdev->reset_done = false;
return 0;
}
static unsigned long addr_to_pgoff(struct vm_area_struct *vma,
unsigned long addr)
{
u64 pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
return ((addr - vma->vm_start) >> PAGE_SHIFT) + pgoff;
}
static vm_fault_t nvgrace_gpu_vfio_pci_huge_fault(struct vm_fault *vmf,
unsigned int order)
{
struct vm_area_struct *vma = vmf->vma;
struct nvgrace_gpu_pci_core_device *nvdev = vma->vm_private_data;
struct vfio_pci_core_device *vdev = &nvdev->core_device;
unsigned int index =
vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
vm_fault_t ret = VM_FAULT_FALLBACK;
struct mem_region *memregion;
unsigned long pfn, addr;
memregion = nvgrace_gpu_memregion(index, nvdev);
if (!memregion)
return VM_FAULT_SIGBUS;
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
pfn = PHYS_PFN(memregion->memphys) + addr_to_pgoff(vma, addr);
if (is_aligned_for_order(vma, addr, pfn, order)) {
scoped_guard(rwsem_read, &vdev->memory_lock) {
if (vdev->pm_runtime_engaged ||
nvgrace_gpu_check_device_ready(nvdev))
return VM_FAULT_SIGBUS;
ret = vfio_pci_vmf_insert_pfn(vdev, vmf, pfn, order);
}
}
dev_dbg_ratelimited(&vdev->pdev->dev,
"%s order = %d pfn 0x%lx: 0x%x\n",
__func__, order, pfn,
(unsigned int)ret);
return ret;
}
static vm_fault_t nvgrace_gpu_vfio_pci_fault(struct vm_fault *vmf)
{
return nvgrace_gpu_vfio_pci_huge_fault(vmf, 0);
}
static const struct vm_operations_struct nvgrace_gpu_vfio_pci_mmap_ops = {
.fault = nvgrace_gpu_vfio_pci_fault,
#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
.huge_fault = nvgrace_gpu_vfio_pci_huge_fault,
#endif
};
static inline
struct nvgrace_gpu_pci_core_device *vma_to_nvdev(struct vm_area_struct *vma)
{
if (vma->vm_ops != &nvgrace_gpu_vfio_pci_mmap_ops)
return NULL;
return vma->vm_private_data;
}
static int nvgrace_gpu_mmap(struct vfio_device *core_vdev,
struct vm_area_struct *vma)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
struct mem_region *memregion;
u64 req_len, pgoff, end;
unsigned int index;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
memregion = nvgrace_gpu_memregion(index, nvdev);
if (!memregion)
return vfio_pci_core_mmap(core_vdev, vma);
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
if (check_sub_overflow(vma->vm_end, vma->vm_start, &req_len) ||
check_add_overflow(PFN_PHYS(pgoff), req_len, &end))
return -EOVERFLOW;
if (end > memregion->memlength)
return -EINVAL;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
if (index == RESMEM_REGION_INDEX) {
vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED);
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
vma->vm_ops = &nvgrace_gpu_vfio_pci_mmap_ops;
vma->vm_private_data = nvdev;
return 0;
}
static int nvgrace_gpu_ioctl_get_region_info(struct vfio_device *core_vdev,
struct vfio_region_info *info,
struct vfio_info_cap *caps)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
struct vfio_region_info_cap_sparse_mmap *sparse;
struct mem_region *memregion;
u32 size;
int ret;
memregion = nvgrace_gpu_memregion(info->index, nvdev);
if (!memregion)
return vfio_pci_ioctl_get_region_info(core_vdev, info, caps);
size = struct_size(sparse, areas, 1);
sparse = kzalloc(size, GFP_KERNEL);
if (!sparse)
return -ENOMEM;
sparse->nr_areas = 1;
sparse->areas[0].offset = 0;
sparse->areas[0].size = memregion->memlength;
sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
sparse->header.version = 1;
ret = vfio_info_add_capability(caps, &sparse->header, size);
kfree(sparse);
if (ret)
return ret;
info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
info->size = memregion->bar_size;
info->flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP;
return 0;
}
static long nvgrace_gpu_ioctl(struct vfio_device *core_vdev,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case VFIO_DEVICE_IOEVENTFD:
return -ENOTTY;
case VFIO_DEVICE_RESET:
nvgrace_gpu_init_fake_bar_emu_regs(core_vdev);
fallthrough;
default:
return vfio_pci_core_ioctl(core_vdev, cmd, arg);
}
}
static __le64
nvgrace_gpu_get_read_value(size_t bar_size, u64 flags, __le64 val64)
{
u64 tmp_val;
tmp_val = le64_to_cpu(val64);
tmp_val &= ~(bar_size - 1);
tmp_val |= flags;
return cpu_to_le64(tmp_val);
}
static ssize_t
nvgrace_gpu_read_config_emu(struct vfio_device *core_vdev,
char __user *buf, size_t count, loff_t *ppos)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
struct mem_region *memregion = NULL;
__le64 val64;
size_t register_offset;
loff_t copy_offset;
size_t copy_count;
int ret;
ret = vfio_pci_core_read(core_vdev, buf, count, ppos);
if (ret < 0)
return ret;
if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2,
sizeof(val64),
©_offset, ©_count,
®ister_offset))
memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev);
else if (vfio_pci_core_range_intersect_range(pos, count,
PCI_BASE_ADDRESS_4,
sizeof(val64),
©_offset, ©_count,
®ister_offset))
memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev);
if (memregion) {
val64 = nvgrace_gpu_get_read_value(memregion->bar_size,
PCI_BASE_ADDRESS_MEM_TYPE_64 |
PCI_BASE_ADDRESS_MEM_PREFETCH,
memregion->bar_val);
if (copy_to_user(buf + copy_offset,
(void *)&val64 + register_offset, copy_count)) {
*ppos -= count;
return -EFAULT;
}
}
return count;
}
static ssize_t
nvgrace_gpu_write_config_emu(struct vfio_device *core_vdev,
const char __user *buf, size_t count, loff_t *ppos)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
struct mem_region *memregion = NULL;
size_t register_offset;
loff_t copy_offset;
size_t copy_count;
if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2,
sizeof(u64), ©_offset,
©_count, ®ister_offset))
memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev);
else if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_4,
sizeof(u64), ©_offset,
©_count, ®ister_offset))
memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev);
if (memregion) {
if (copy_from_user((void *)&memregion->bar_val + register_offset,
buf + copy_offset, copy_count))
return -EFAULT;
*ppos += copy_count;
return copy_count;
}
return vfio_pci_core_write(core_vdev, buf, count, ppos);
}
static int
nvgrace_gpu_map_device_mem(int index,
struct nvgrace_gpu_pci_core_device *nvdev)
{
struct mem_region *memregion;
int ret = 0;
memregion = nvgrace_gpu_memregion(index, nvdev);
if (!memregion)
return -EINVAL;
mutex_lock(&nvdev->remap_lock);
if (memregion->memaddr)
goto unlock;
if (index == USEMEM_REGION_INDEX)
memregion->memaddr = memremap(memregion->memphys,
memregion->memlength,
MEMREMAP_WB);
else
memregion->ioaddr = ioremap_wc(memregion->memphys,
memregion->memlength);
if (!memregion->memaddr)
ret = -ENOMEM;
unlock:
mutex_unlock(&nvdev->remap_lock);
return ret;
}
static int
nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev,
char __user *buf, size_t mem_count, loff_t *ppos)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
int ret;
if (!mem_count)
return 0;
ret = nvgrace_gpu_map_device_mem(index, nvdev);
if (ret)
return ret;
if (index == USEMEM_REGION_INDEX) {
if (copy_to_user(buf,
(u8 *)nvdev->usemem.memaddr + offset,
mem_count))
ret = -EFAULT;
} else {
ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
nvdev->resmem.ioaddr,
buf, offset, mem_count,
0, 0, false, VFIO_PCI_IO_WIDTH_8);
}
return ret;
}
static ssize_t
nvgrace_gpu_read_mem(struct nvgrace_gpu_pci_core_device *nvdev,
char __user *buf, size_t count, loff_t *ppos)
{
struct vfio_pci_core_device *vdev = &nvdev->core_device;
u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct mem_region *memregion;
size_t mem_count, i;
u8 val = 0xFF;
int ret;
memregion = nvgrace_gpu_memregion(index, nvdev);
if (offset >= memregion->bar_size)
return -EINVAL;
count = min(count, memregion->bar_size - (size_t)offset);
if (offset >= memregion->memlength)
mem_count = 0;
else
mem_count = min(count, memregion->memlength - (size_t)offset);
scoped_guard(rwsem_read, &vdev->memory_lock) {
ret = nvgrace_gpu_check_device_ready(nvdev);
if (ret)
return ret;
ret = nvgrace_gpu_map_and_read(nvdev, buf, mem_count, ppos);
if (ret)
return ret;
}
for (i = mem_count; i < count; i++) {
ret = put_user(val, (unsigned char __user *)(buf + i));
if (ret)
return ret;
}
*ppos += count;
return count;
}
static ssize_t
nvgrace_gpu_read(struct vfio_device *core_vdev,
char __user *buf, size_t count, loff_t *ppos)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
struct vfio_pci_core_device *vdev = &nvdev->core_device;
int ret;
if (nvgrace_gpu_memregion(index, nvdev)) {
if (pm_runtime_resume_and_get(&vdev->pdev->dev))
return -EIO;
ret = nvgrace_gpu_read_mem(nvdev, buf, count, ppos);
pm_runtime_put(&vdev->pdev->dev);
return ret;
}
if (index == VFIO_PCI_CONFIG_REGION_INDEX)
return nvgrace_gpu_read_config_emu(core_vdev, buf, count, ppos);
return vfio_pci_core_read(core_vdev, buf, count, ppos);
}
static int
nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev,
const char __user *buf, size_t mem_count,
loff_t *ppos)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret;
if (!mem_count)
return 0;
ret = nvgrace_gpu_map_device_mem(index, nvdev);
if (ret)
return ret;
if (index == USEMEM_REGION_INDEX) {
if (copy_from_user((u8 *)nvdev->usemem.memaddr + pos,
buf, mem_count))
return -EFAULT;
} else {
ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
nvdev->resmem.ioaddr,
(char __user *)buf, pos, mem_count,
0, 0, true, VFIO_PCI_IO_WIDTH_8);
}
return ret;
}
static ssize_t
nvgrace_gpu_write_mem(struct nvgrace_gpu_pci_core_device *nvdev,
size_t count, loff_t *ppos, const char __user *buf)
{
struct vfio_pci_core_device *vdev = &nvdev->core_device;
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
struct mem_region *memregion;
size_t mem_count;
int ret = 0;
memregion = nvgrace_gpu_memregion(index, nvdev);
if (offset >= memregion->bar_size)
return -EINVAL;
count = min(count, memregion->bar_size - (size_t)offset);
if (offset >= memregion->memlength)
goto exitfn;
mem_count = min(count, memregion->memlength - (size_t)offset);
scoped_guard(rwsem_read, &vdev->memory_lock) {
ret = nvgrace_gpu_check_device_ready(nvdev);
if (ret)
return ret;
ret = nvgrace_gpu_map_and_write(nvdev, buf, mem_count, ppos);
if (ret)
return ret;
}
exitfn:
*ppos += count;
return count;
}
static ssize_t
nvgrace_gpu_write(struct vfio_device *core_vdev,
const char __user *buf, size_t count, loff_t *ppos)
{
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
core_device.vdev);
struct vfio_pci_core_device *vdev = &nvdev->core_device;
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
int ret;
if (nvgrace_gpu_memregion(index, nvdev)) {
if (pm_runtime_resume_and_get(&vdev->pdev->dev))
return -EIO;
ret = nvgrace_gpu_write_mem(nvdev, count, ppos, buf);
pm_runtime_put(&vdev->pdev->dev);
return ret;
}
if (index == VFIO_PCI_CONFIG_REGION_INDEX)
return nvgrace_gpu_write_config_emu(core_vdev, buf, count, ppos);
return vfio_pci_core_write(core_vdev, buf, count, ppos);
}
static int nvgrace_get_dmabuf_phys(struct vfio_pci_core_device *core_vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges)
{
struct nvgrace_gpu_pci_core_device *nvdev = container_of(
core_vdev, struct nvgrace_gpu_pci_core_device, core_device);
struct pci_dev *pdev = core_vdev->pdev;
struct mem_region *mem_region;
mem_region = nvgrace_gpu_memregion(region_index, nvdev);
if (mem_region) {
*provider = pcim_p2pdma_provider(pdev, 0);
if (!*provider)
return -EINVAL;
return vfio_pci_core_fill_phys_vec(phys_vec, dma_ranges,
nr_ranges,
mem_region->memphys,
mem_region->memlength);
}
return vfio_pci_core_get_dmabuf_phys(core_vdev, provider, region_index,
phys_vec, dma_ranges, nr_ranges);
}
static const struct vfio_pci_device_ops nvgrace_gpu_pci_dev_ops = {
.get_dmabuf_phys = nvgrace_get_dmabuf_phys,
};
static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
.name = "nvgrace-gpu-vfio-pci",
.init = vfio_pci_core_init_dev,
.release = vfio_pci_core_release_dev,
.open_device = nvgrace_gpu_open_device,
.close_device = nvgrace_gpu_close_device,
.ioctl = nvgrace_gpu_ioctl,
.get_region_info_caps = nvgrace_gpu_ioctl_get_region_info,
.device_feature = vfio_pci_core_ioctl_feature,
.read = nvgrace_gpu_read,
.write = nvgrace_gpu_write,
.mmap = nvgrace_gpu_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct vfio_pci_device_ops nvgrace_gpu_pci_dev_core_ops = {
.get_dmabuf_phys = vfio_pci_core_get_dmabuf_phys,
};
static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
.name = "nvgrace-gpu-vfio-pci-core",
.init = vfio_pci_core_init_dev,
.release = vfio_pci_core_release_dev,
.open_device = nvgrace_gpu_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
.get_region_info_caps = vfio_pci_ioctl_get_region_info,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int
nvgrace_gpu_fetch_memory_property(struct pci_dev *pdev,
u64 *pmemphys, u64 *pmemlength)
{
int ret;
ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-base-pa",
pmemphys);
if (ret)
return ret;
if (*pmemphys > type_max(phys_addr_t))
return -EOVERFLOW;
ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-size",
pmemlength);
if (ret)
return ret;
if (*pmemlength > type_max(size_t))
return -EOVERFLOW;
if (*pmemlength == 0)
return -ENOMEM;
return ret;
}
static int
nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
struct nvgrace_gpu_pci_core_device *nvdev,
u64 memphys, u64 memlength)
{
int ret = 0;
u64 resmem_size = 0;
if (nvdev->has_mig_hw_bug)
resmem_size = SZ_1G;
nvdev->usemem.memphys = memphys;
if (check_sub_overflow(memlength, resmem_size,
&nvdev->usemem.memlength)) {
ret = -EOVERFLOW;
goto done;
}
nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
if (!nvdev->has_mig_hw_bug)
goto done;
nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
MEMBLK_SIZE);
if (nvdev->usemem.memlength == 0) {
ret = -EINVAL;
goto done;
}
if ((check_add_overflow(nvdev->usemem.memphys,
nvdev->usemem.memlength,
&nvdev->resmem.memphys)) ||
(check_sub_overflow(memlength, nvdev->usemem.memlength,
&nvdev->resmem.memlength))) {
ret = -EOVERFLOW;
goto done;
}
nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
done:
return ret;
}
static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev)
{
int pcie_dvsec;
u16 dvsec_ctrl16;
pcie_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_NVIDIA,
GPU_CAP_DVSEC_REGISTER);
if (pcie_dvsec) {
pci_read_config_word(pdev,
pcie_dvsec + DVSEC_BITMAP_OFFSET,
&dvsec_ctrl16);
if (dvsec_ctrl16 & MIG_SUPPORTED_WITH_CACHED_RESMEM)
return false;
}
return true;
}
static int nvgrace_gpu_probe_check_device_ready(struct pci_dev *pdev)
{
void __iomem *io;
int ret;
ret = pci_enable_device(pdev);
if (ret)
return ret;
ret = pci_request_selected_regions(pdev, 1 << 0, KBUILD_MODNAME);
if (ret)
goto request_region_exit;
io = pci_iomap(pdev, 0, 0);
if (!io) {
ret = -ENOMEM;
goto iomap_exit;
}
ret = nvgrace_gpu_wait_device_ready(io);
pci_iounmap(pdev, io);
iomap_exit:
pci_release_selected_regions(pdev, 1 << 0);
request_region_exit:
pci_disable_device(pdev);
return ret;
}
static int nvgrace_gpu_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct vfio_device_ops *ops = &nvgrace_gpu_pci_core_ops;
struct nvgrace_gpu_pci_core_device *nvdev;
u64 memphys, memlength;
int ret;
ret = nvgrace_gpu_probe_check_device_ready(pdev);
if (ret)
return ret;
ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength);
if (!ret)
ops = &nvgrace_gpu_pci_ops;
nvdev = vfio_alloc_device(nvgrace_gpu_pci_core_device, core_device.vdev,
&pdev->dev, ops);
if (IS_ERR(nvdev))
return PTR_ERR(nvdev);
dev_set_drvdata(&pdev->dev, &nvdev->core_device);
if (ops == &nvgrace_gpu_pci_ops) {
nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev);
ret = nvgrace_gpu_init_nvdev_struct(pdev, nvdev,
memphys, memlength);
if (ret)
goto out_put_vdev;
nvdev->core_device.pci_ops = &nvgrace_gpu_pci_dev_ops;
} else {
nvdev->core_device.pci_ops = &nvgrace_gpu_pci_dev_core_ops;
}
ret = vfio_pci_core_register_device(&nvdev->core_device);
if (ret)
goto out_put_vdev;
return ret;
out_put_vdev:
vfio_put_device(&nvdev->core_device.vdev);
return ret;
}
static void nvgrace_gpu_remove(struct pci_dev *pdev)
{
struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
vfio_pci_core_unregister_device(core_device);
vfio_put_device(&core_device->vdev);
}
static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2941) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x31C2) },
{}
};
MODULE_DEVICE_TABLE(pci, nvgrace_gpu_vfio_pci_table);
static void nvgrace_gpu_vfio_pci_reset_done(struct pci_dev *pdev)
{
struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
struct nvgrace_gpu_pci_core_device *nvdev =
container_of(core_device, struct nvgrace_gpu_pci_core_device,
core_device);
nvdev->reset_done = true;
}
static const struct pci_error_handlers nvgrace_gpu_vfio_pci_err_handlers = {
.reset_done = nvgrace_gpu_vfio_pci_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
static struct pci_driver nvgrace_gpu_vfio_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = nvgrace_gpu_vfio_pci_table,
.probe = nvgrace_gpu_probe,
.remove = nvgrace_gpu_remove,
.err_handler = &nvgrace_gpu_vfio_pci_err_handlers,
.driver_managed_dma = true,
};
module_pci_driver(nvgrace_gpu_vfio_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ankit Agrawal <ankita@nvidia.com>");
MODULE_AUTHOR("Aniket Agashe <aniketa@nvidia.com>");
MODULE_DESCRIPTION("VFIO NVGRACE GPU PF - User Level driver for NVIDIA devices with CPU coherently accessible device memory");