need_flush
unsigned int subregions, bool need_flush)
if (need_flush)
bool need_flush = region == PMSAv7_RAM_REGION;
xip[i].subreg, need_flush);
bool need_flush = false;
need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
if (need_flush && mm_ops->dcache_clean_inval_poc)
if (need_flush)
bool need_flush = false;
get_new_mmu_context(mm, cpu, &need_flush);
if (need_flush)
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
*need_flush = true; /* start new asid cycle */
bool need_flush = false;
get_new_mmu_context(next, cpu, &need_flush);
(n < pool->hint || need_flush(iommu))) {
need_flush = true;
need_flush = true;
need_flush = true;
if (need_flush) {
need_flush = false;
static bool need_flush; /* global flush state. set for each gart wrap */
unsigned int need_flush : 1;
ns.need_flush = 1;
ns.need_flush = 0;
ns.need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < next_tlb_gen);
ns.need_flush = true;
bool need_flush)
if (need_flush) {
ns.need_flush = true;
if (ns.need_flush) {
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
if (need_flush || switch_context) {
if (need_flush) {
if (need_flush) {
unsigned int need_flush;
err = i915_gem_object_prepare_write(obj, &need_flush);
int need_flush = 0;
need_flush |= !cancel_delayed_work(&map->timeout);
if (need_flush)
need_flush = true;
need_flush = true;
if (need_flush) {
bool need_flush = false;
int need_flush = 0;
need_flush = 1;
need_flush = 1;
if (need_flush ||
bool need_flush = false;
need_flush |= act == XDP_REDIRECT;
need_flush |= act == XDP_REDIRECT;
if (need_flush)
if (capsnap->need_flush) {
BUG_ON(!capsnap->need_flush);
if (!capsnap->need_flush &&
capsnap->need_flush = true;
ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
bool need_flush;
int need_flush = 0;
need_flush = 1;
if (need_flush)