pcl
u32 pcl = pc & GENMASK(31, 2);
u32 u_offset = target - pcl;
pgste_init.pcl = uses_skeys && init.h.i;
if (!pgste_init.pcl)
if (!pgstes[i].pcl)
unsigned long pcl : 1;
if (old_pgste.pcl)
old_pgste.pcl = 1;
pgste.pcl = 0;
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
sizeof(struct pcl),
sizeof(struct pcl),
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
struct pcl *rcv_start_pcl, *rcv_pcl;
unsigned long pcl = ULONG_MAX;
f25.t * i < pcl) {
pcl = f25.t * i;
f32.t * i < pcl) {
pcl = f32.t * i;
fext.t * i < pcl) {
pcl = fext.t * i;
plen = pcl / pclock->t;
if (!f->pcl) {
f->pcl->besteffort |= !ra;
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
return PAGE_ALIGN(pcl->pageofs_in + pcl->pclustersize) >> PAGE_SHIFT;
if (f->pcl->length < offset + end - map->m_la) {
f->pcl->length = offset + end - map->m_la;
f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
f->pcl->length == map->m_llen)
f->pcl->partial = false;
struct z_erofs_pcluster *pcl;
int poff = bvec->offset + be->pcl->pageofs_out;
bvec->offset + bvec->end == be->pcl->length)) {
unsigned int off0 = be->pcl->pageofs_out;
end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
struct z_erofs_pcluster *pcl = be->pcl;
z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
for (i = 0; i < pcl->vcnt; ++i) {
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
if (pcl->from_meta ||
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
z_erofs_decomp[pcl->algorithmformat];
mutex_lock(&pcl->lock);
be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
.pageofs_in = pcl->pageofs_in,
.pageofs_out = pcl->pageofs_out,
.inputsize = pcl->pclustersize,
.outputsize = pcl->length,
.alg = pcl->algorithmformat,
.partial_decoding = pcl->partial,
.gfp = pcl->besteffort ? GFP_KERNEL :
if (pcl->besteffort || reason != ERR_PTR(-ENOMEM))
alg->name, reason, pcl->pos,
pcl->pclustersize, pcl->length);
alg->name, reason, pcl->pos,
pcl->pclustersize, pcl->length);
if (pcl->from_meta) {
folio_put(page_folio(pcl->compressed_bvecs[0].page));
WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
pcl->length = 0;
pcl->partial = true;
pcl->besteffort = false;
pcl->bvset.nextpage = NULL;
pcl->vcnt = 0;
WRITE_ONCE(pcl->next, NULL);
mutex_unlock(&pcl->lock);
if (pcl->from_meta)
z_erofs_free_pcluster(pcl);
z_erofs_put_pcluster(sbi, pcl, try_free);
.pcl = io->head,
for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) {
DBG_BUGON(!be.pcl);
next = READ_ONCE(be.pcl->next);
struct z_erofs_pcluster *pcl,
spin_lock(&pcl->lockref.lock);
zbv = pcl->compressed_bvecs[nr];
spin_unlock(&pcl->lockref.lock);
folio_attach_private(folio, pcl);
if (likely(folio->private == pcl)) {
spin_lock(&pcl->lockref.lock);
if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
spin_unlock(&pcl->lockref.lock);
pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
spin_unlock(&pcl->lockref.lock);
filemap_add_folio(mc, folio, (pcl->pos >> PAGE_SHIFT) + nr, gfp)) {
folio_attach_private(folio, pcl);
static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl,
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
WRITE_ONCE(*qtail[JQ_BYPASS], pcl);
qtail[JQ_BYPASS] = &pcl->next;
struct z_erofs_pcluster *pcl, *next;
pcl = next;
next = READ_ONCE(pcl->next);
if (pcl->from_meta) {
z_erofs_move_to_bypass_queue(pcl, next, qtail);
.m_pa = round_down(pcl->pos, sb->s_blocksize),
end = round_up(cur + pcl->pageofs_in + pcl->pclustersize,
z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
qtail[JQ_SUBMIT] = &pcl->next;
z_erofs_move_to_bypass_queue(pcl, next, qtail);
struct z_erofs_pcluster *pcl;
pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
if (!pcl)
return pcl;
static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
kmem_cache_free(pcs->slab, pcl);
struct z_erofs_pcluster *pcl, *head;
struct z_erofs_pcluster *pcl = fe->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
pgoff_t poff = pcl->pos >> PAGE_SHIFT;
if (READ_ONCE(pcl->compressed_bvecs[i].page))
spin_lock(&pcl->lockref.lock);
if (!pcl->compressed_bvecs[i].page) {
pcl->compressed_bvecs[i].page =
spin_unlock(&pcl->lockref.lock);
spin_unlock(&pcl->lockref.lock);
struct z_erofs_pcluster *pcl)
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
DBG_BUGON(pcl->from_meta);
if (pcl->compressed_bvecs[i].page) {
folio = page_folio(pcl->compressed_bvecs[i].page);
pcl->compressed_bvecs[i].page = NULL;
struct z_erofs_pcluster *pcl = folio_get_private(folio);
struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
spin_lock(&pcl->lockref.lock);
if (pcl->lockref.count <= 0) {
DBG_BUGON(pcl->from_meta);
spin_unlock(&pcl->lockref.lock);
struct z_erofs_pcluster *pcl = fe->pcl;
if (pcl->algorithmformat < Z_EROFS_COMPRESSION_MAX ||
spin_lock(&pcl->lockref.lock);
if (pcl->compressed_bvecs[--fe->icur].page)
pcl->compressed_bvecs[fe->icur] = *bvec;
spin_unlock(&pcl->lockref.lock);
spin_unlock(&pcl->lockref.lock);
fe->pcl->vcnt += (ret >= 0);
static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
if (lockref_get_not_zero(&pcl->lockref))
spin_lock(&pcl->lockref.lock);
if (__lockref_is_dead(&pcl->lockref)) {
spin_unlock(&pcl->lockref.lock);
if (!pcl->lockref.count++)
spin_unlock(&pcl->lockref.lock);
struct z_erofs_pcluster *pcl, *pre;
pcl = z_erofs_alloc_pcluster(pageofs_in + map->m_plen);
if (IS_ERR(pcl))
return PTR_ERR(pcl);
lockref_init(&pcl->lockref); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->pclustersize = map->m_plen;
pcl->length = 0;
pcl->partial = true;
pcl->next = fe->head;
pcl->pos = map->m_pa;
pcl->pageofs_in = pageofs_in;
pcl->pageofs_out = map->m_la & ~PAGE_MASK;
pcl->from_meta = map->m_flags & EROFS_MAP_META;
mutex_init(&pcl->lock);
DBG_BUGON(!mutex_trylock(&pcl->lock));
if (!pcl->from_meta) {
pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->pos,
NULL, pcl, GFP_KERNEL);
fe->pcl = pre;
fe->head = fe->pcl = pcl;
mutex_unlock(&pcl->lock);
z_erofs_free_pcluster(pcl);
struct z_erofs_pcluster *pcl = NULL;
DBG_BUGON(fe->pcl);
pcl = xa_load(&EROFS_SB(sb)->managed_pslots, map->m_pa);
if (!pcl || z_erofs_get_pcluster(pcl)) {
DBG_BUGON(pcl && map->m_pa != pcl->pos);
if (pcl) {
fe->pcl = pcl;
mutex_lock(&fe->pcl->lock);
if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) {
fe->head = fe->pcl;
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
if (!fe->pcl->from_meta) {
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, ptr);
fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
fe->icur = z_erofs_pclusterpages(fe->pcl);
struct z_erofs_pcluster *pcl)
if (pcl->lockref.count)
if (erofs_try_to_free_all_cached_folios(sbi, pcl))
DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->pos) != pcl);
lockref_mark_dead(&pcl->lockref);
struct z_erofs_pcluster *pcl)
spin_lock(&pcl->lockref.lock);
free = __erofs_try_to_release_pcluster(sbi, pcl);
spin_unlock(&pcl->lockref.lock);
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
struct z_erofs_pcluster *pcl;
xa_for_each(&sbi->managed_pslots, index, pcl) {
if (!erofs_try_to_release_pcluster(sbi, pcl))
struct z_erofs_pcluster *pcl, bool try_free)
if (lockref_put_or_lock(&pcl->lockref))
DBG_BUGON(__lockref_is_dead(&pcl->lockref));
if (!--pcl->lockref.count) {
free = __erofs_try_to_release_pcluster(sbi, pcl);
spin_unlock(&pcl->lockref.lock);
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
struct z_erofs_pcluster *pcl = fe->pcl;
if (!pcl)
mutex_unlock(&pcl->lock);
z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false);
fe->pcl = NULL;
extern void pcl_free(struct svc_rdma_pcl *pcl);
struct svc_rdma_pcl *pcl, __be32 *p);
extern int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl,
static inline void pcl_init(struct svc_rdma_pcl *pcl)
INIT_LIST_HEAD(&pcl->cl_chunks);
static inline bool pcl_is_empty(const struct svc_rdma_pcl *pcl)
return list_empty(&pcl->cl_chunks);
pcl_first_chunk(const struct svc_rdma_pcl *pcl)
if (pcl_is_empty(pcl))
return list_first_entry(&pcl->cl_chunks, struct svc_rdma_chunk,
pcl_next_chunk(const struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk)
if (list_is_last(&chunk->ch_list, &pcl->cl_chunks))
#define pcl_for_each_chunk(pos, pcl) \
for (pos = list_first_entry(&(pcl)->cl_chunks, struct svc_rdma_chunk, ch_list); \
&pos->ch_list != &(pcl)->cl_chunks; \
struct svc_rdma_pcl *pcl = &rctxt->rc_call_pcl;
unsigned int i, segcount = pcl->cl_count;
pcl->cl_count = 0;
if (pcl_is_empty(pcl)) {
pcl_insert_position(pcl, chunk);
chunk = list_first_entry(&pcl->cl_chunks,
struct svc_rdma_pcl *pcl = &rctxt->rc_read_pcl;
unsigned int i, segcount = pcl->cl_count;
pcl->cl_count = 0;
void pcl_free(struct svc_rdma_pcl *pcl)
chunk = pcl_lookup_position(pcl, position);
pcl_insert_position(pcl, chunk);
while (!list_empty(&pcl->cl_chunks)) {
struct svc_rdma_pcl *pcl, __be32 *p)
for (i = 0; i < pcl->cl_count; i++) {
list_add_tail(&chunk->ch_list, &pcl->cl_chunks);
chunk = pcl_first_chunk(pcl);
int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl,
chunk = pcl_first_chunk(pcl);
while ((next = pcl_next_chunk(pcl, chunk))) {
pcl_lookup_position(struct svc_rdma_pcl *pcl, u32 position)
pcl_for_each_chunk(pos, pcl) {
static void pcl_insert_position(struct svc_rdma_pcl *pcl,
pcl_for_each_chunk(pos, pcl) {
pcl->cl_count++;
const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
if (pcl_is_empty(pcl))
chunk = pcl_first_chunk(pcl);
pcl_for_each_chunk(chunk, pcl) {
next = pcl_next_chunk(pcl, chunk);
const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
chunk = pcl_first_chunk(pcl);
pcl_for_each_chunk(chunk, pcl) {
next = pcl_next_chunk(pcl, chunk);