#ifndef _CORESIGHT_TMC_H
#define _CORESIGHT_TMC_H
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/crc32.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
#define TMC_RRD 0x010
#define TMC_RRP 0x014
#define TMC_RWP 0x018
#define TMC_TRG 0x01c
#define TMC_CTL 0x020
#define TMC_RWD 0x024
#define TMC_MODE 0x028
#define TMC_LBUFLEVEL 0x02c
#define TMC_CBUFLEVEL 0x030
#define TMC_BUFWM 0x034
#define TMC_RRPHI 0x038
#define TMC_RWPHI 0x03c
#define TMC_AXICTL 0x110
#define TMC_DBALO 0x118
#define TMC_DBAHI 0x11c
#define TMC_FFSR 0x300
#define TMC_FFCR 0x304
#define TMC_PSCR 0x308
#define TMC_ITMISCOP0 0xee0
#define TMC_ITTRFLIN 0xee8
#define TMC_ITATBDATA0 0xeec
#define TMC_ITATBCTR2 0xef0
#define TMC_ITATBCTR1 0xef4
#define TMC_ITATBCTR0 0xef8
#define TMC_AUTHSTATUS 0xfb8
#define TMC_CTL_CAPT_EN BIT(0)
#define TMC_STS_TMCREADY_BIT 2
#define TMC_STS_FULL BIT(0)
#define TMC_STS_TRIGGERED BIT(1)
#define TMC_STS_MEMERR BIT(5)
#define TMC_AXICTL_CLEAR_MASK 0xfbf
#define TMC_AXICTL_ARCACHE_MASK (0xf << 16)
#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
#define TMC_AXICTL_WR_BURST(v) (((v) & 0xf) << 8)
#define TMC_AXICTL_WR_BURST_16 0xf
#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
#define TMC_FFSR_FT_STOPPED BIT(1)
#define TMC_FFCR_FLUSHMAN_BIT 6
#define TMC_FFCR_EN_FMT BIT(0)
#define TMC_FFCR_EN_TI BIT(1)
#define TMC_FFCR_FON_FLIN BIT(4)
#define TMC_FFCR_FON_TRIG_EVT BIT(5)
#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
#define TMC_DEVID_NOSCAT BIT(24)
#define TMC_DEVID_AXIAW_VALID BIT(16)
#define TMC_DEVID_AXIAW_SHIFT 17
#define TMC_DEVID_AXIAW_MASK 0x7f
#define TMC_AUTH_NSID_MASK GENMASK(1, 0)
#define CS_CRASHDATA_VERSION (1 << 16)
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
TMC_CONFIG_TYPE_ETF,
};
enum tmc_mode {
TMC_MODE_CIRCULAR_BUFFER,
TMC_MODE_SOFTWARE_FIFO,
TMC_MODE_HARDWARE_FIFO,
};
enum tmc_mem_intf_width {
TMC_MEM_INTF_WIDTH_32BITS = 1,
TMC_MEM_INTF_WIDTH_64BITS = 2,
TMC_MEM_INTF_WIDTH_128BITS = 4,
TMC_MEM_INTF_WIDTH_256BITS = 8,
};
#define TMC_ETR_SG (0x1U << 0)
#define TMC_ETR_AXI_ARCACHE (0x1U << 1)
#define TMC_ETR_SAVE_RESTORE (0x1U << 2)
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
struct tmc_crash_metadata {
uint32_t crc32_mdata;
uint32_t crc32_tdata;
uint32_t version;
uint32_t valid;
uint32_t tmc_ram_size;
uint32_t tmc_sts;
uint32_t tmc_mode;
uint32_t tmc_ffcr;
uint32_t tmc_ffsr;
uint32_t reserved32;
uint64_t tmc_rrp;
uint64_t tmc_rwp;
uint64_t tmc_dba;
uint64_t trace_paddr;
uint64_t reserved64[3];
};
enum etr_mode {
ETR_MODE_FLAT,
ETR_MODE_ETR_SG,
ETR_MODE_CATU,
ETR_MODE_RESRV,
ETR_MODE_AUTO,
};
struct etr_buf_operations;
struct etr_buf {
refcount_t refcount;
enum etr_mode mode;
bool full;
ssize_t size;
dma_addr_t hwaddr;
unsigned long offset;
s64 len;
const struct etr_buf_operations *ops;
void *private;
};
struct tmc_resrv_buf {
phys_addr_t paddr;
void *vaddr;
size_t size;
unsigned long offset;
bool reading;
s64 len;
};
struct tmc_drvdata {
struct clk *atclk;
struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
struct miscdevice miscdev;
struct miscdevice crashdev;
raw_spinlock_t spinlock;
pid_t pid;
bool reading;
bool stop_on_flush;
union {
char *buf;
struct etr_buf *etr_buf;
};
u32 len;
u32 size;
u32 max_burst_size;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
u32 etr_caps;
enum etr_mode etr_mode;
struct idr idr;
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
struct etr_buf *perf_buf;
struct tmc_resrv_buf resrv_buf;
struct tmc_resrv_buf crash_mdata;
};
struct etr_buf_operations {
int (*alloc)(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
int node, void **pages);
void (*sync)(struct etr_buf *etr_buf, u64 rrp, u64 rwp);
ssize_t (*get_data)(struct etr_buf *etr_buf, u64 offset, size_t len,
char **bufpp);
void (*free)(struct etr_buf *etr_buf);
};
struct tmc_pages {
int nr_pages;
dma_addr_t *daddrs;
struct page **pages;
};
struct tmc_sg_table {
struct device *dev;
void *table_vaddr;
void *data_vaddr;
dma_addr_t table_daddr;
int node;
struct tmc_pages table_pages;
struct tmc_pages data_pages;
};
int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);
u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata);
int tmc_read_prepare_crashdata(struct tmc_drvdata *drvdata);
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etb_cs_ops;
extern const struct coresight_ops tmc_etf_cs_ops;
ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
#define TMC_REG_PAIR(name, lo_off, hi_off) \
static inline u64 \
tmc_read_##name(struct tmc_drvdata *drvdata) \
{ \
return csdev_access_relaxed_read_pair(&drvdata->csdev->access, lo_off, hi_off); \
} \
static inline void \
tmc_write_##name(struct tmc_drvdata *drvdata, u64 val) \
{ \
csdev_access_relaxed_write_pair(&drvdata->csdev->access, val, lo_off, hi_off); \
}
TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
TMC_REG_PAIR(rwp, TMC_RWP, TMC_RWPHI)
TMC_REG_PAIR(dba, TMC_DBALO, TMC_DBAHI)
static inline void tmc_etr_init_caps(struct tmc_drvdata *drvdata, u32 dev_caps)
{
WARN_ON(drvdata->etr_caps);
drvdata->etr_caps = dev_caps;
}
static inline void tmc_etr_set_cap(struct tmc_drvdata *drvdata, u32 cap)
{
drvdata->etr_caps |= cap;
}
static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
{
return !!(drvdata->etr_caps & cap);
}
struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
int node,
int nr_tpages,
int nr_dpages,
void **pages);
void tmc_free_sg_table(struct tmc_sg_table *sg_table);
void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table);
void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
u64 offset, u64 size);
ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
u64 offset, size_t len, char **bufpp);
static inline unsigned long
tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
{
return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT;
}
static inline bool tmc_has_reserved_buffer(struct tmc_drvdata *drvdata)
{
if (drvdata->resrv_buf.vaddr &&
drvdata->resrv_buf.size)
return true;
return false;
}
static inline bool tmc_has_crash_mdata_buffer(struct tmc_drvdata *drvdata)
{
if (drvdata->crash_mdata.vaddr &&
drvdata->crash_mdata.size)
return true;
return false;
}
static inline void tmc_crashdata_set_invalid(struct tmc_drvdata *drvdata)
{
struct tmc_crash_metadata *mdata;
mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
if (tmc_has_crash_mdata_buffer(drvdata))
mdata->valid = false;
}
static inline uint32_t find_crash_metadata_crc(struct tmc_crash_metadata *md)
{
unsigned long crc_size;
crc_size = sizeof(struct tmc_crash_metadata) -
offsetof(struct tmc_crash_metadata, crc32_tdata);
return crc32_le(0, (void *)&md->crc32_tdata, crc_size);
}
static inline uint32_t find_crash_tracedata_crc(struct tmc_drvdata *drvdata,
struct tmc_crash_metadata *md)
{
unsigned long crc_size;
crc_size = md->tmc_ram_size << 2;
return crc32_le(0, (void *)drvdata->resrv_buf.vaddr, crc_size);
}
struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
void tmc_etr_remove_catu_ops(void);
struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
enum cs_mode mode,
struct coresight_path *path);
extern const struct attribute_group coresight_etr_group;
#endif