#include "pvr_device.h"
#include "pvr_gem.h"
#include "pvr_vm.h"
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
#include <linux/compiler.h>
#include <linux/compiler_attributes.h>
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/iosys-map.h>
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
static void pvr_gem_object_free(struct drm_gem_object *obj)
{
drm_gem_shmem_object_free(obj);
}
static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
{
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
return ERR_PTR(-EPERM);
return drm_gem_prime_export(obj, flags);
}
static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
{
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS))
return -EINVAL;
return drm_gem_shmem_mmap(shmem_obj, vma);
}
static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
.free = pvr_gem_object_free,
.print_info = drm_gem_shmem_object_print_info,
.export = pvr_gem_export,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = pvr_gem_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
static bool
pvr_gem_object_flags_validate(u64 flags)
{
static const u64 invalid_combinations[] = {
(DRM_PVR_BO_PM_FW_PROTECT |
DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
};
if ((flags & PVR_BO_UNDEFINED_MASK) != 0)
return false;
for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
u64 combo = invalid_combinations[i];
if ((flags & combo) == combo)
return false;
}
return true;
}
int
pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
struct pvr_file *pvr_file, u32 *handle)
{
struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
struct drm_file *file = from_pvr_file(pvr_file);
u32 new_handle;
int err;
err = drm_gem_handle_create(file, gem_obj, &new_handle);
if (err)
return err;
pvr_gem_object_put(pvr_obj);
*handle = new_handle;
return 0;
}
struct pvr_gem_object *
pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle)
{
struct drm_file *file = from_pvr_file(pvr_file);
struct drm_gem_object *gem_obj;
gem_obj = drm_gem_object_lookup(file, handle);
if (!gem_obj)
return NULL;
return gem_to_pvr_gem(gem_obj);
}
void *
pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
{
struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
struct iosys_map map;
int err;
dma_resv_lock(obj->resv, NULL);
err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
if (err)
goto err_unlock;
if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
struct device *dev = shmem_obj->base.dev->dev;
if (shmem_obj->sgt)
dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
dma_resv_unlock(obj->resv);
return map.vaddr;
err_unlock:
dma_resv_unlock(obj->resv);
return ERR_PTR(err);
}
void
pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
{
struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
if (WARN_ON(!map.vaddr))
return;
dma_resv_lock(obj->resv, NULL);
if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
struct device *dev = shmem_obj->base.dev->dev;
if (shmem_obj->sgt)
dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
drm_gem_shmem_vunmap_locked(shmem_obj, &map);
dma_resv_unlock(obj->resv);
}
static int
pvr_gem_object_zero(struct pvr_gem_object *pvr_obj)
{
void *cpu_ptr;
cpu_ptr = pvr_gem_object_vmap(pvr_obj);
if (IS_ERR(cpu_ptr))
return PTR_ERR(cpu_ptr);
memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj));
wmb();
pvr_gem_object_vunmap(pvr_obj);
return 0;
}
struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size)
{
struct drm_gem_object *gem_obj;
struct pvr_gem_object *pvr_obj;
pvr_obj = kzalloc_obj(*pvr_obj);
if (!pvr_obj)
return ERR_PTR(-ENOMEM);
gem_obj = gem_from_pvr_gem(pvr_obj);
gem_obj->funcs = &pvr_gem_object_funcs;
return gem_obj;
}
struct pvr_gem_object *
pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
{
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct drm_gem_shmem_object *shmem_obj;
struct pvr_gem_object *pvr_obj;
struct sg_table *sgt;
int err;
if (size == 0 || !pvr_gem_object_flags_validate(flags))
return ERR_PTR(-EINVAL);
if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
flags |= PVR_BO_CPU_CACHED;
shmem_obj = drm_gem_shmem_create(drm_dev, size);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
shmem_obj->pages_mark_dirty_on_put = true;
shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
pvr_obj->flags = flags;
sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto err_shmem_object_free;
}
dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
pvr_gem_object_zero(pvr_obj);
return pvr_obj;
err_shmem_object_free:
drm_gem_shmem_free(shmem_obj);
return ERR_PTR(err);
}
int
pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
dma_addr_t *dma_addr_out)
{
struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
u32 accumulated_offset = 0;
struct scatterlist *sgl;
unsigned int sgt_idx;
WARN_ON(!shmem_obj->sgt);
for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) {
u32 new_offset = accumulated_offset + sg_dma_len(sgl);
if (offset >= accumulated_offset && offset < new_offset) {
*dma_addr_out = sg_dma_address(sgl) +
(offset - accumulated_offset);
return 0;
}
accumulated_offset = new_offset;
}
return -EINVAL;
}