mtx_initialized
if (mtx_initialized(&sc->emac_mtx))
if (mtx_initialized(&group_mode->lock) == 0)
if (mtx_initialized(&group_mode->lock) == 0)
KASSERT(mtx_initialized(&sc->mtx), ("gpio mutex not initialized"));
KASSERT(mtx_initialized(&sc->mtx), ("gpio mutex not initialized"));
KASSERT(mtx_initialized(&sc->sc_mtx), ("gpio mutex not initialized"));
if (mtx_initialized(&sc->mtx))
if (mtx_initialized(&idr->lock) == 0)
if (mtx_initialized(&sc->sc_mtx)) {
if (!mtx_initialized(&xgbe_phy_comm_lock))
if (mtx_initialized(&sc->bce_mtx))
if (mtx_initialized(&sc->bge_mtx)) /* XXX */
if (mtx_initialized(&fp->tx_mtx)) {
if (mtx_initialized(&fp->rx_mtx)) {
if (mtx_initialized(&sc->core_mtx)) {
if (mtx_initialized(&sc->sp_mtx)) {
if (mtx_initialized(&sc->dmae_mtx)) {
if (mtx_initialized(&sc->port.phy_mtx)) {
if (mtx_initialized(&sc->fwmb_mtx)) {
if (mtx_initialized(&sc->print_mtx)) {
if (mtx_initialized(&sc->stats_mtx)) {
if (mtx_initialized(&sc->mcast_mtx)) {
if (mtx_initialized(&fp->tx_mtx)) {
if (!mtx_initialized(&sc->tids.ftid_lock)) {
if (!mtx_initialized(&sc->tids.ftid_lock)) {
if (mtx_initialized(&t->hftid_lock)) {
if (mtx_initialized(&sc->tids.ftid_lock)) {
if (mtx_initialized(&sc->tids.atid_lock))
if (mtx_initialized(&sc->ifp_lock))
if (mtx_initialized(&t->atid_lock))
if (mtx_initialized(&sc->tc_lock))
if (mtx_initialized(&t->etid_lock))
if (mtx_initialized(&t->stid_lock))
if (mtx_initialized(&td->unsent_wr_lock))
if (mtx_initialized(&td->lctx_hash_lock))
if (mtx_initialized(&td->toep_list_lock))
KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized"));
KASSERT(mtx_initialized(&sc->sc_mtx),
if (mtx_initialized(&tx->ring_mtx))
if (mtx_initialized(&lock->mutex))
if (mtx_initialized(&lock->mutex))
if (mtx_initialized(&lock->mutex))
if (mtx_initialized(&lock->mutex))
if (mtx_initialized(&sc->mfi_io_lock)) {
if (mtx_initialized(&sc->admin_reply_lock))
if (mtx_initialized(&op_reply_q->q_lock))
if (mtx_initialized(&op_req_q->q_lock))
if (mtx_initialized(&sc->init_cmds.completion.lock))
if (mtx_initialized(&sc->cfg_cmds.completion.lock))
if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
if (mtx_initialized(&sc->reset_mutex))
if (mtx_initialized(&sc->target_lock))
if (mtx_initialized(&sc->fwevt_lock))
if (mtx_initialized(&sc->cmd_pool_lock))
if (mtx_initialized(&sc->reply_free_q_lock))
if (mtx_initialized(&sc->sense_buf_q_lock))
if (mtx_initialized(&sc->chain_buf_lock))
if (mtx_initialized(&sc->admin_req_lock))
if (mtx_initialized(&sc->mpi3mr_mtx))
KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
if (mtx_initialized(&sc->mtx))
if (mtx_initialized(&pq->lock)) {
KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
if (!mtx_initialized(&ctrlr->adminq.lock))
if (!mtx_initialized(&ns->lock))
if (mtx_initialized(&qpair->recovery))
if (mtx_initialized(&qpair->lock))
if (mtx_initialized(&qpair->recovery))
if (mtx_initialized(&qpair->lock))
if (!mtx_initialized(&qpair->lock))
if (mtx_initialized(&qpair->lock))
if (mtx_initialized(&qpair->recovery))
if (mtx_initialized(&(lock)->mutex))\
if (mtx_initialized(&ocs->sim_lock) == 0) {
if (mtx_initialized(&ocs->sim_lock))
if (mtx_initialized(&ocs->sim_lock))
if (!mtx_initialized(&timer->lock)) {
if (mtx_initialized(&(lock)->lock)) {
if (mtx_initialized(&(lock)->lock)) {
if (mtx_initialized(&(lock)->lock)) {
if (mtx_initialized(&(lock)->lock)) {
if (mtx_initialized(&(lock)->lock)) {
if (mtx_initialized(&(lock)->lock)) {
if(mtx_initialized(&xport->io_pending_lock.lock))
KASSERT(mtx_initialized(&sc->gpio_mtx), ("gpio mutex not initialized"));
if (mtx_initialized(&fp->tx_mtx)) {
if (mtx_initialized(&qlnx_rdma_dev_lock) && (qlnx_host_list == NULL)) {
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
if (!mtx_initialized(&qlnx_rdma_dev_lock)) {
if (mtx_initialized(&dev->idr_lock))
if (mtx_initialized(&dev->sgid_lock))
if (mtx_initialized(&fp->tx_mtx)) {
KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
#define RTWN_CMDQ_LOCK_INITIALIZED(sc) mtx_initialized(&(sc)->cmdq_mtx)
#define RTWN_NT_LOCK_INITIALIZED(sc) mtx_initialized(&(sc)->nt_mtx)
KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
if (mtx_initialized(&sc->smc_mtx))
KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
KASSERT(mtx_initialized(&adapter->lock), ("SUME mutex not "
#define SYM_LOCK_INITIALIZED() mtx_initialized(&np->mtx)
KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized"));
if (!mtx_initialized(&tsec_phy_mtx))
if (!mtx_initialized(&hwq->qlock))
if (mtx_initialized(&hwq->recovery_lock))
if (mtx_initialized(&hwq->qlock))
if (mtx_initialized(&hwq->recovery_lock))
if (mtx_initialized(&hwq->qlock))
if (mtx_initialized(&hwq->recovery_lock))
if (mtx_initialized(&hwq->qlock))
KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
if (mtx_initialized(&txq->vtntx_mtx) != 0)
if mtx_initialized(&sc->vmci_spinlock) {
if mtx_initialized(&sc->vmci_spinlock)
return mtx_initialized(lock);
return mtx_initialized(mutex);
if (mtx_initialized(lock))
KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
if (mtx_initialized(&cnputs_mtx)) {
if (!mtx_initialized(&devsoftc.mtx)) {
if (mtx_initialized(&sc->sc_mtx) == 0) {
if (!mtx_initialized(&of_bounce_mtx))
if (!mtx_initialized(&opalcons_buffer.mtx))