mem_id
.id = mem_id,
.mem_id = HL_MMAP_TYPE_CB,
u64 mem_id;
.mem_id = HL_MMAP_TYPE_TS_BUFF,
buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats)
switch (mem_id) {
u64 mem_id;
mem_id = buf->behavior->mem_id;
hl_mem_mgr_fini_stats_inc(mem_id, stats);
s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id);
if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) {
tmp = adev->gmc.mem_partitions[mem_id].size;
(adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
if (adev->gmc.mem_partitions && mem_id >= 0) {
places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
int8_t mem_id;
uint8_t mem_id;
xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
xcp_mgr->xcp[i].mem_id = mem_id;
fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
uint8_t mem_id;
struct amdgpu_xcp *xcp, uint8_t *mem_id);
int xcc_id, uint8_t *mem_id)
*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
struct amdgpu_xcp *xcp, uint8_t *mem_id)
*mem_id = 0;
return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
*mem_id = i;
KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
vm->mem_id, local_node, nid);
int xcc_id, uint8_t *mem_id)
*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
struct amdgpu_xcp *xcp, uint8_t *mem_id)
*mem_id = 0;
return __soc_v1_0_get_xcp_mem_id(adev, xcc_id, mem_id);
*mem_id = i;
node->node_id, node->xcp->mem_id,
(!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
req = &alive_path->reqs[fail_req->permit->mem_id];
buf_id = req->permit->mem_id;
buf_id = req->permit->mem_id;
permit->mem_id = i;
if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
WARN_ON(permit->mem_id != bit);
clear_bit_unlock(permit->mem_id, clt->permits_map);
req = &clt_path->reqs[permit->mem_id];
unsigned int mem_id;
uint32_t mem_id;
if (t.mem_id == MEM_CM)
else if (t.mem_id == MEM_PMRX)
else if (t.mem_id == MEM_PMTX)
if (t->mem_id == MEM_CM)
else if (t->mem_id == MEM_PMRX)
else if (t->mem_id == MEM_PMTX)
static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
switch (mem_id) {
static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
switch (mem_id) {
enum ipa_mem_id mem_id = mem->id;
if (!ipa_mem_id_valid(ipa, mem_id)) {
dev_err(dev, "region id %u not valid\n", mem_id);
dev_err(dev, "empty memory region %u\n", mem_id);
size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
mem_id, size_multiple);
dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
mem_id, mem->canary_count);
else if (mem_id == IPA_MEM_END_MARKER && mem->size)
enum ipa_mem_id mem_id;
for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
if (ipa_mem_id_required(ipa, mem_id))
mem_id);
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
if (mem->id == mem_id)
ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
enum ipa_mem_id mem_id;
mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED
return ipa_mem_find(ipa, mem_id);
int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
if (!pruss || !region || mem_id >= PRUSS_MEM_MAX)
if (pruss->mem_in_use[mem_id]) {
*region = pruss->mem_regions[mem_id];
pruss->mem_in_use[mem_id] = region;
int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
enum pruss_mem mem_id,
__field(u32, mem_id)
__entry->mem_id = xa->mem.id;
__entry->mem_id,
__field(u32, mem_id)
__entry->mem_id = xa->mem.id;
__entry->mem_id,
u32 mem_id = *(u32 *)arg->key;
return xa->mem.id != mem_id;