#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "panthor_device.h"
#include "panthor_gpu.h"
#include "panthor_hw.h"
#include "panthor_regs.h"
struct panthor_gpu {
struct panthor_irq irq;
spinlock_t reqs_lock;
u32 pending_reqs;
wait_queue_head_t reqs_acked;
struct mutex cache_flush_lock;
};
#define GPU_INTERRUPTS_MASK \
(GPU_IRQ_FAULT | \
GPU_IRQ_PROTM_FAULT | \
GPU_IRQ_RESET_COMPLETED | \
GPU_IRQ_CLEAN_CACHES_COMPLETED)
static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
{
gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
ptdev->gpu_info.selected_coherency);
}
static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
{
const struct panthor_soc_data *data = ptdev->soc_data;
u32 l2_config;
u32 i;
if (!data || !data->asn_hash_enable)
return;
if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) {
drm_err(&ptdev->base, "Custom ASN hash not supported by the device");
return;
}
for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++)
gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]);
l2_config = gpu_read(ptdev, GPU_L2_CONFIG);
l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE;
gpu_write(ptdev, GPU_L2_CONFIG, l2_config);
}
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
gpu_write(ptdev, GPU_INT_CLEAR, status);
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
address);
}
if (status & GPU_IRQ_PROTM_FAULT)
drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
spin_lock(&ptdev->gpu->reqs_lock);
if (status & ptdev->gpu->pending_reqs) {
ptdev->gpu->pending_reqs &= ~status;
wake_up_all(&ptdev->gpu->reqs_acked);
}
spin_unlock(&ptdev->gpu->reqs_lock);
}
PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
void panthor_gpu_unplug(struct panthor_device *ptdev)
{
unsigned long flags;
if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
ptdev->gpu->pending_reqs = 0;
wake_up_all(&ptdev->gpu->reqs_acked);
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
}
int panthor_gpu_init(struct panthor_device *ptdev)
{
struct panthor_gpu *gpu;
u32 pa_bits;
int ret, irq;
gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
if (!gpu)
return -ENOMEM;
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
mutex_init(&gpu->cache_flush_lock);
ptdev->gpu = gpu;
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
if (ret)
return ret;
irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
if (irq < 0)
return irq;
ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
if (ret)
return ret;
return 0;
}
int panthor_gpu_block_power_off(struct panthor_device *ptdev,
const char *blk_name,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
u32 val;
int ret;
ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
!(mask & val), 100, timeout_us);
if (ret) {
drm_err(&ptdev->base,
"timeout waiting on %s:%llx power transition", blk_name,
mask);
return ret;
}
gpu_write64(ptdev, pwroff_reg, mask);
ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
!(mask & val), 100, timeout_us);
if (ret) {
drm_err(&ptdev->base,
"timeout waiting on %s:%llx power transition", blk_name,
mask);
return ret;
}
return 0;
}
int panthor_gpu_block_power_on(struct panthor_device *ptdev,
const char *blk_name,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
u32 val;
int ret;
ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
!(mask & val), 100, timeout_us);
if (ret) {
drm_err(&ptdev->base,
"timeout waiting on %s:%llx power transition", blk_name,
mask);
return ret;
}
gpu_write64(ptdev, pwron_reg, mask);
ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
(mask & val) == val,
100, timeout_us);
if (ret) {
drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
blk_name, mask);
return ret;
}
return 0;
}
void panthor_gpu_l2_power_off(struct panthor_device *ptdev)
{
panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
}
int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
{
if (ptdev->gpu_info.l2_present != 1) {
u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
(ptdev->gpu_info.l2_present - 2);
drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
hweight64(core_mask),
hweight64(ptdev->gpu_info.shader_present));
}
panthor_gpu_coherency_set(ptdev);
panthor_gpu_l2_config_set(ptdev);
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other)
{
unsigned long flags;
int ret = 0;
guard(mutex)(&ptdev->gpu->cache_flush_lock);
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
} else {
ret = -EIO;
}
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
if (ret)
return ret;
if (!wait_event_timeout(ptdev->gpu->reqs_acked,
!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
msecs_to_jiffies(100))) {
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
!(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
ret = -ETIMEDOUT;
else
ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
}
if (ret) {
panthor_device_schedule_reset(ptdev);
drm_err(&ptdev->base, "Flush caches timeout");
}
return ret;
}
int panthor_gpu_soft_reset(struct panthor_device *ptdev)
{
bool timedout = false;
unsigned long flags;
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
}
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
if (!wait_event_timeout(ptdev->gpu->reqs_acked,
!(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
msecs_to_jiffies(100))) {
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
!(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
timedout = true;
else
ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
}
if (timedout) {
drm_err(&ptdev->base, "Soft reset timeout");
return -ETIMEDOUT;
}
ptdev->gpu->pending_reqs = 0;
return 0;
}
void panthor_gpu_suspend(struct panthor_device *ptdev)
{
if (!ptdev->reset.fast)
panthor_hw_soft_reset(ptdev);
else
panthor_hw_l2_power_off(ptdev);
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
void panthor_gpu_resume(struct panthor_device *ptdev)
{
panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
panthor_hw_l2_power_on(ptdev);
}