cancel_work
cancel_work(&ap->deferred_qc_work);
cancel_work(&ap->deferred_qc_work);
bool cancel_work;
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
if (cancel_work)
cancel_work(&adev->reset_work);
cancel_work(&adev->userq_reset_work);
cancel_work(&adev->kfd.reset_work);
cancel_work(&adev->virt.flr_work);
cancel_work(&con->recovery_work);
cancel_work(&display->hotplug.poll_init_work);
cancel_work(&group->tiler_oom_work);
cancel_work(&dp->hpd_work);
cancel_work(&si->rx_mode_task);
cancel_work(&fep->timeout_work);
cancel_work(&tracer->update_db_work);
goto cancel_work;
cancel_work:
cancel_work(&iod->work);
cancel_work(&ap_scan_bus_work);
cancel_work(&ap_scan_bus_work);
if (cancel_work(&sd_dp->ew.work))
static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
if (cancel_work)
bool cancel_work)
sbp_session_release(sess, cancel_work);
cancel_work(&server->destroyer);
struct ksmbd_work *cancel_work;
list_for_each_entry(cancel_work, &fp->blocked_works,
cancel_work->state = KSMBD_WORK_CLOSED;
cancel_work->cancel_fn(cancel_work->cancel_argv);
extern bool cancel_work(struct work_struct *work);
if (!cancel_work(&w->work) && work_busy(&w->work))
cancel_work(&w->work);
EXPORT_SYMBOL(cancel_work);
cancel_work(&hdev->dump.dump_rx);
cancel_work(&conn->processor);
cancel_work(&call->work);