Beispiel #1
0
static void
rtwn_usb_reset_tx_list(struct rtwn_usb_softc *uc,
    rtwn_datahead *head, struct ieee80211vap *vap)
{
	struct rtwn_vap *uvp = RTWN_VAP(vap);
	struct rtwn_data *dp, *tmp;
	int id;

	id = (uvp != NULL ? uvp->id : RTWN_VAP_ID_INVALID);

	STAILQ_FOREACH_SAFE(dp, head, next, tmp) {
		if (vap == NULL || (dp->ni == NULL &&
		    (dp->id == id || id == RTWN_VAP_ID_INVALID)) ||
		    (dp->ni != NULL && dp->ni->ni_vap == vap)) {
			if (dp->ni != NULL) {
				ieee80211_free_node(dp->ni);
				dp->ni = NULL;
			}

			if (dp->m != NULL) {
				m_freem(dp->m);
				dp->m = NULL;
			}

			STAILQ_REMOVE(head, dp, rtwn_data, next);
			STAILQ_INSERT_TAIL(&uc->uc_tx_inactive, dp, next);
		}
	}
}
Beispiel #2
0
int
ctl_frontend_deregister(struct ctl_frontend *fe)
{
	struct ctl_io_pool *pool;
	int port_num;
	int retval;

	retval = 0;

	pool = (struct ctl_io_pool *)fe->ctl_pool_ref;

	if (fe->targ_port == -1) {
		retval = 1;
		goto bailout;
	}

	mtx_lock(&control_softc->ctl_lock);
	STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
	control_softc->num_frontends--;
	port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port :
	                                             fe->targ_port - CTL_MAX_PORTS;
	ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
	control_softc->ctl_ports[port_num] = NULL;
	mtx_unlock(&control_softc->ctl_lock);

	ctl_pool_free(pool);

bailout:
	return (retval);
}
Beispiel #3
0
static void idle_backend_handler(struct ns_connection *nc, int ev,
                                 void *ev_data) {
    (void) ev_data; /* Unused. */
    struct be_conn *bec = nc->user_data;
    const time_t now = time(NULL);
#ifdef DEBUG
    write_log("%d idle bec=%p nc=%p ev=%d deadline=%d\n", now, bec, nc, ev,
              bec->idle_deadline);
#endif
    switch (ev) {
    case NS_POLL: {
        if (bec->idle_deadline > 0 && now > bec->idle_deadline) {
#ifdef DEBUG
            write_log("bec=%p nc=%p closing due to idleness\n", bec, bec->nc);
#endif
            bec->nc->flags |= NSF_CLOSE_IMMEDIATELY;
        }
        break;
    }

    case NS_CLOSE: {
#ifdef DEBUG
        write_log("bec=%p closed\n", bec);
#endif
        if (bec->idle_deadline > 0) {
            STAILQ_REMOVE(&bec->be->conns, bec, be_conn, conns);
        }
        free(bec);
        break;
    }
    }
}
Beispiel #4
0
/*
 * Allocate a single object of specified size with specified flags (either
 * M_WAITOK or M_NOWAIT).
 */
void *
memguard_alloc(unsigned long size, int flags)
{
	void *obj;
	struct memguard_entry *e = NULL;
	int numpgs;

	numpgs = size / PAGE_SIZE;
	if ((size % PAGE_SIZE) != 0)
		numpgs++;
	if (numpgs > MAX_PAGES_PER_ITEM)
		panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM " \
		    "in memguard.c (requested: %d pages)", numpgs);
	if (numpgs == 0)
		return NULL;

	/*
	 * If we haven't exhausted the memguard_map yet, allocate from
	 * it and grab a new page, even if we have recycled pages in our
	 * FIFO.  This is because we wish to allow recycled pages to live
	 * guarded in the FIFO for as long as possible in order to catch
	 * even very late tamper-after-frees, even though it means that
	 * we end up wasting more memory, this is only a DEBUGGING allocator
	 * after all.
	 */
	MEMGUARD_CRIT_SECTION_ENTER;
	if (memguard_mapused >= memguard_mapsize) {
		e = STAILQ_FIRST(&memguard_fifo_pool[numpgs - 1]);
		if (e != NULL) {
			STAILQ_REMOVE(&memguard_fifo_pool[numpgs - 1], e,
			    memguard_entry, entries);
			MEMGUARD_CRIT_SECTION_EXIT;
			obj = e->ptr;
			free(e, M_TEMP);
			memguard_unguard(obj, numpgs);
			if (flags & M_ZERO)
				bzero(obj, PAGE_SIZE * numpgs);
			return obj;
		}
		MEMGUARD_CRIT_SECTION_EXIT;
		if (flags & M_WAITOK)
			panic("MEMGUARD: Failed with M_WAITOK: " \
			    "memguard_map too small");
		return NULL;
	}
	memguard_mapused += (PAGE_SIZE * numpgs);
	MEMGUARD_CRIT_SECTION_EXIT;

	obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * numpgs, flags);
	if (obj != NULL) {
		vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[numpgs - 1]);
		if (flags & M_ZERO)
			bzero(obj, PAGE_SIZE * numpgs);
	} else {
		MEMGUARD_CRIT_SECTION_ENTER;
		memguard_mapused -= (PAGE_SIZE * numpgs);
		MEMGUARD_CRIT_SECTION_EXIT;
	}
	return obj;
}
Beispiel #5
0
void net_habitue_device_SC101::completeIO(outstanding_io *io)
{
  STAILQ_REMOVE(&_outstandingHead, io, outstanding_io, entries);
  _outstandingCount--;
  
  dequeueAndSubmitIO();
}
Beispiel #6
0
/*! \fn void *thread_io_device(void *parg)
 *  \brief Thread dispositivo I/O
 *  \details La funzione costituisce il corpo del thread che rappresenta il
 *  dispositivo di I/O. Il suddetto thread sospende la propria esecuzione
 *  fintanto che non siano presenti delle richieste d'accesso al dispositivo.
 *  Il thread termina la propria esecuzione quando sono stati compiuti il
 *  numero massimo d'accessi alla memoria (ad opera dell'MMU).
 *  \param parg         inutilizzato
 *  \return             inutilizzato
 */
static void *
thread_io_device(void *parg)
{
    io_entry_t *req;
    struct timespec timeout;
    int num;

    printf("--> Thread DEVICE I/O avviato [Tmin=%d, Tmax=%d]\n",
           io_dev.Tmin, io_dev.Tmax);

    while (!io_device_should_exit) {
        pthread_mutex_lock(&wait_lock);
        while ((ioreq_count == 0) && !io_device_should_exit) {
            pthread_cond_wait(&wait_cond, &wait_lock);
        }

        if (io_device_should_exit && (ioreq_count == 0)) {
            pthread_mutex_unlock(&wait_lock);
            break;
        }

        /*
         *  Estraggo la prima richiesta e la rimuovo dalla coda, aggiornando
         *  la variabile "ioreq_count" che tiene traccia di quante richieste
         *  sono ancora in attesa.
         */
        req = STAILQ_FIRST(&io_request_head);
        assert(req);

        pthread_mutex_lock(&fifo_lock);
        STAILQ_REMOVE(&io_request_head, req, io_entry, entries);
        ioreq_count--;
        pthread_mutex_unlock(&fifo_lock);

        /*
         *  Genero un numero casuale compreso nell'intervallo chiuso
         *  [Tmin,Tmax] utile a simulare il reperimento dell'informazione dal
         *  dispositivo.
         */
        num = bounded_rand(io_dev.Tmin, io_dev.Tmax);
        timeout.tv_sec = 0;
        timeout.tv_nsec = num * 1000000;
        nanosleep(&timeout, NULL);
        fprintf(LOG_FILE(req->procnum),
                "Richiesta d'accesso servita in %d ms\n", num);

        /*
         *  Aggiorno le statistiche e "risveglio" il processo che ha fatto la
         *  richiesta.
         */
        io_dev.req_count++;
        proc_table[req->procnum]->stats.io_requests++;
        proc_table[req->procnum]->stats.time_elapsed += num;
        pthread_mutex_unlock(&wait_lock);
        pthread_cond_signal(&proc_table[req->procnum]->io_cond);
        XFREE(req);
    }
    printf("<-- Thread DEVICE I/O terminato\n");
    pthread_exit(NULL);
}
Beispiel #7
0
int timer_remove(long timer_id)
{
  int rc = 0;
  struct timer_elm_s *timer_p;

  TMR_DEBUG("Removing timer 0x%lx\n", timer_id);

  pthread_mutex_lock(&timer_desc.timer_list_mutex);
  TIMER_SEARCH(timer_p, timer, ((timer_t)timer_id), &timer_desc.timer_queue);

  /* We didn't find the timer in list */
  if (timer_p == NULL) {
    pthread_mutex_unlock(&timer_desc.timer_list_mutex);
    TMR_ERROR("Didn't find timer 0x%lx in list\n", timer_id);
    return -1;
  }

  STAILQ_REMOVE(&timer_desc.timer_queue, timer_p, timer_elm_s, entries);
  pthread_mutex_unlock(&timer_desc.timer_list_mutex);

  if (timer_delete(timer_p->timer) < 0) {
    TMR_ERROR("Failed to delete timer 0x%lx\n", (long)timer_p->timer);
    rc = -1;
  }

  free(timer_p);
  timer_p = NULL;
  return rc;
}
struct os_event *
os_eventq_get(struct os_eventq *evq)
{
    struct os_event *ev;
    os_sr_t sr;

    OS_ENTER_CRITICAL(sr);
pull_one:
    ev = STAILQ_FIRST(&evq->evq_list);
    if (ev) {
        STAILQ_REMOVE(&evq->evq_list, ev, os_event, ev_next);
        ev->ev_queued = 0;
    } else {
        evq->evq_task = os_sched_get_current_task();
        os_sched_sleep(evq->evq_task, OS_TIMEOUT_NEVER);
        OS_EXIT_CRITICAL(sr);

        os_sched(NULL, 0);

        OS_ENTER_CRITICAL(sr);
        evq->evq_task = NULL;
        goto pull_one;
    }
    OS_EXIT_CRITICAL(sr);

    return (ev);
}
Beispiel #9
0
int
linker_file_unload(linker_file_t file)
{
    module_t mod, next;
    struct common_symbol* cp;
    int error = 0;
    int i;

    KLD_DPF(FILE, ("linker_file_unload: lf->refs=%d\n", file->refs));
    lockmgr(&lock, LK_EXCLUSIVE|LK_RETRY, 0, curproc);
    if (file->refs == 1) {
	KLD_DPF(FILE, ("linker_file_unload: file is unloading, informing modules\n"));
	/*
	 * Inform any modules associated with this file.
	 */
	for (mod = TAILQ_FIRST(&file->modules); mod; mod = next) {
	    next = module_getfnext(mod);

	    /*
	     * Give the module a chance to veto the unload.
	     */
	    if (error = module_unload(mod)) {
		KLD_DPF(FILE, ("linker_file_unload: module %x vetoes unload\n",
			       mod));
		lockmgr(&lock, LK_RELEASE, 0, curproc);
		goto out;
	    }

	    module_release(mod);
	}
    }

    file->refs--;
    if (file->refs > 0) {
	lockmgr(&lock, LK_RELEASE, 0, curproc);
	goto out;
    }

    linker_file_sysuninit(file);

    TAILQ_REMOVE(&files, file, link);
    lockmgr(&lock, LK_RELEASE, 0, curproc);

    for (i = 0; i < file->ndeps; i++)
	linker_file_unload(file->deps[i]);
    free(file->deps, M_LINKER);

    for (cp = STAILQ_FIRST(&file->common); cp;
	 cp = STAILQ_FIRST(&file->common)) {
	STAILQ_REMOVE(&file->common, cp, common_symbol, link);
	free(cp, M_LINKER);
    }

    file->ops->unload(file);
    free(file, M_LINKER);

out:
    return error;
}
Beispiel #10
0
/*
 * Remove mbuf from the mhdr Q
 */
void
mbuf_remove(struct mhdr *mhdr, struct mbuf *mbuf)
{
    log_debug(LOG_VVERB, "remove mbuf %p len %d", mbuf, mbuf->last - mbuf->pos);

    STAILQ_REMOVE(mhdr, mbuf, mbuf, next);
    STAILQ_NEXT(mbuf, next) = NULL;
}
Beispiel #11
0
static int
smu_run_cmd(device_t dev, struct smu_cmd *cmd, int wait)
{
	struct smu_softc *sc;
	uint8_t cmd_code;
	int error;

	sc = device_get_softc(dev);
	cmd_code = cmd->cmd;

	mtx_lock(&sc->sc_mtx);
	if (sc->sc_cur_cmd != NULL) {
		STAILQ_INSERT_TAIL(&sc->sc_cmdq, cmd, cmd_q);
	} else
		smu_send_cmd(dev, cmd);
	mtx_unlock(&sc->sc_mtx);

	if (!wait)
		return (0);

	if (sc->sc_doorbellirqid < 0) {
		/* Poll if the IRQ has not been set up yet */
		do {
			DELAY(50);
			smu_doorbell_intr(dev);
		} while (sc->sc_cur_cmd != NULL);
	} else {
		/* smu_doorbell_intr will wake us when the command is ACK'ed */
		error = tsleep(cmd, 0, "smu", 800 * hz / 1000);
		if (error != 0)
			smu_doorbell_intr(dev);	/* One last chance */
		
		if (error != 0) {
		    mtx_lock(&sc->sc_mtx);
		    if (cmd->cmd == cmd_code) {	/* Never processed */
			/* Abort this command if we timed out */
			if (sc->sc_cur_cmd == cmd)
				sc->sc_cur_cmd = NULL;
			else
				STAILQ_REMOVE(&sc->sc_cmdq, cmd, smu_cmd,
				    cmd_q);
			mtx_unlock(&sc->sc_mtx);
			return (error);
		    }
		    error = 0;
		    mtx_unlock(&sc->sc_mtx);
		}
	}

	/* SMU acks the command by inverting the command bits */
	if (cmd->cmd == ((~cmd_code) & 0xff))
		error = 0;
	else
		error = EIO;

	return (error);
}
Beispiel #12
0
int __recvpath
psmi_mq_handle_data(psm_mq_req_t req, psm_epaddr_t epaddr,
		    uint32_t egrid, uint32_t offset,
		    const void *buf, uint32_t nbytes)
{
    psm_mq_t mq;
    int rc;
    
    if (req == NULL) goto no_req;

    mq = req->mq;
    if (req->state == MQ_STATE_MATCHED)
	rc = MQ_RET_MATCH_OK;
    else {
	psmi_assert(req->state == MQ_STATE_UNEXP);
	rc = MQ_RET_UNEXP_OK;
    }

    psmi_assert(req->egrid.egr_data == egrid);
    psmi_mq_req_copy(req, epaddr, offset, buf, nbytes);

    if (req->send_msgoff == req->send_msglen) {
	if (req->type & MQE_TYPE_EGRLONG) {
	    STAILQ_REMOVE(&epaddr->mctxt_master->egrlong,
				req, psm_mq_req, nextq);
	}
	    
	if (req->state == MQ_STATE_MATCHED) {
	    req->state = MQ_STATE_COMPLETE;
	    mq_qq_append(&mq->completed_q, req);
	}
	else { /* MQ_STATE_UNEXP */
	    req->state = MQ_STATE_COMPLETE;
	}
	_IPATH_VDBG("epaddr=%s completed %d byte send, state=%d\n", 
		    psmi_epaddr_get_name(epaddr->epid),
		    (int)req->send_msglen, req->state);
    }

    return rc;

no_req:
    mq = epaddr->ep->mq;
    req = psmi_mq_req_alloc(mq, MQE_TYPE_RECV);
    psmi_assert(req != NULL);

    req->egrid.egr_data = egrid;
    req->recv_msgoff = offset;
    req->recv_msglen = nbytes;
    req->buf = psmi_mq_sysbuf_alloc(mq, nbytes);
    psmi_mq_mtucpy(req->buf, buf, nbytes);

    STAILQ_INSERT_TAIL(&epaddr->mctxt_master->egrdata, req, nextq);

    return MQ_RET_UNEXP_OK;
}
Beispiel #13
0
rstatus_t
cursor_destory(cursor_t *cursor)
{
    leveldb_iter_destroy(cursor->iter);
    STAILQ_REMOVE(&cursorq, cursor, cursor_s, next);
    nc_free(cursor);
    ncursor--;

    return NC_OK;
}
Beispiel #14
0
gdp_event_t *
gdp_event_next(gdp_gcl_t *gcl, EP_TIME_SPEC *timeout)
{
	gdp_event_t *gev;
	EP_TIME_SPEC *abs_to = NULL;
	EP_TIME_SPEC tv;

	if (timeout != NULL)
	{
		ep_time_deltanow(timeout, &tv);
		abs_to = &tv;
	}

	ep_thr_mutex_lock(&ActiveListMutex);
	for (;;)
	{
		int err;

		while ((gev = STAILQ_FIRST(&ActiveList)) == NULL)
		{
			// wait until we have at least one thing to try
			err = ep_thr_cond_wait(&ActiveListSig, &ActiveListMutex, abs_to);
			if (err == ETIMEDOUT)
				goto fail0;
		}
		while (gev != NULL)
		{
			// if this isn't the GCL we want, keep searching the list
			if (gcl == NULL || gev->gcl == gcl)
				break;

			// not the event we want
			gev = STAILQ_NEXT(gev, queue);
		}

		if (gev != NULL)
		{
			// found a match!
			break;
		}

		// if there is no match, wait until something is added and try again
		err = ep_thr_cond_wait(&ActiveListSig, &ActiveListMutex, abs_to);
		if (err == ETIMEDOUT)
			break;
	}

	if (gev != NULL)
		STAILQ_REMOVE(&ActiveList, gev, gdp_event, queue);
fail0:
	ep_thr_mutex_unlock(&ActiveListMutex);

	// the callback must call gdp_event_free(gev)
	return gev;
}
Beispiel #15
0
void rte_vmbus_chan_close(struct vmbus_channel *chan)
{
	const struct rte_vmbus_device *device = chan->device;
	struct vmbus_channel *primary = device->primary;

	if (chan != primary)
		STAILQ_REMOVE(&primary->subchannel_list, chan,
			      vmbus_channel, next);

	rte_free(chan);
}
Beispiel #16
0
void net_habitue_device_SC101::dequeueAndSubmitIO()
{
  outstanding_io *io = STAILQ_FIRST(&_pendingHead);
  
  if (io)
  {
    STAILQ_REMOVE(&_pendingHead, io, outstanding_io, entries);
    _pendingCount--;
    
    submitIO(io);
  }
}
Beispiel #17
0
static void
nvmf_deactive_tx_desc(struct nvme_qp_tx_desc *tx_desc)
{
	struct spdk_nvmf_conn *conn;

	RTE_VERIFY(tx_desc != NULL);
	conn = tx_desc->conn;
	RTE_VERIFY(tx_desc->conn != NULL);

	STAILQ_REMOVE(&conn->qp_tx_active_desc, tx_desc, nvme_qp_tx_desc, link);
	STAILQ_INSERT_TAIL(&conn->qp_tx_desc, tx_desc, link);
}
void
os_eventq_remove(struct os_eventq *evq, struct os_event *ev)
{
    os_sr_t sr;

    OS_ENTER_CRITICAL(sr);
    if (OS_EVENT_QUEUED(ev)) {
        STAILQ_REMOVE(&evq->evq_list, ev, os_event, ev_next);
    }
    ev->ev_queued = 0;
    OS_EXIT_CRITICAL(sr);
}
Beispiel #19
0
/**
 * mutt_regexlist_free - Free a RegexList object
 * @param rl RegexList to free
 */
void mutt_regexlist_free(struct RegexList *rl)
{
  if (!rl)
    return;

  struct RegexListNode *np = NULL, *tmp = NULL;
  STAILQ_FOREACH_SAFE(np, rl, entries, tmp)
  {
    STAILQ_REMOVE(rl, np, RegexListNode, entries);
    mutt_regex_free(&np->regex);
    FREE(&np);
  }
Beispiel #20
0
struct conn *
conn_cq_pop(struct conn_q *cq)
{
    struct conn *c;

    pthread_mutex_lock(&cq->lock);
    c = STAILQ_FIRST(&cq->hdr);
    if (c != NULL) {
        STAILQ_REMOVE(&cq->hdr, c, conn, c_tqe);
    }
    pthread_mutex_unlock(&cq->lock);

    return c;
}
Beispiel #21
0
void
taskqueue_free(struct taskqueue *queue)
{
	int s = splhigh();
	queue->tq_draining = 1;
	splx(s);

	taskqueue_run(queue);

	s = splhigh();
	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
	splx(s);

	free(queue, M_TASKQUEUE);
}
Beispiel #22
0
static inline void
hv_put_rndis_request(rndis_device *device, rndis_request *request)
{
	mtx_lock_spin(&device->req_lock);
	/* Fixme:  Has O(n) performance */
	/*
	 * XXXKYS: Use Doubly linked lists.
	 */
	STAILQ_REMOVE(&device->myrequest_list, request, rndis_request_,
	    mylist_entry);
	mtx_unlock_spin(&device->req_lock);

	sema_destroy(&request->wait_sema);
	free(request, M_DEVBUF);
}
Beispiel #23
0
static int
flowadv_thread_cont(int err)
{
#pragma unused(err)
	for (;;) {
		lck_mtx_assert(&fadv_lock, LCK_MTX_ASSERT_OWNED);
		while (STAILQ_EMPTY(&fadv_list)) {
			VERIFY(!fadv_active);
			(void) msleep0(&fadv_list, &fadv_lock, (PSOCK | PSPIN),
			    "flowadv_cont", 0, flowadv_thread_cont);
			/* NOTREACHED */
		}

		fadv_active = 1;
		for (;;) {
			struct flowadv_fcentry *fce;

			VERIFY(!STAILQ_EMPTY(&fadv_list));
			fce = STAILQ_FIRST(&fadv_list);
			STAILQ_REMOVE(&fadv_list, fce,
			    flowadv_fcentry, fce_link);
			STAILQ_NEXT(fce, fce_link) = NULL;

			lck_mtx_unlock(&fadv_lock);
			switch (fce->fce_flowsrc) {
			case FLOWSRC_INPCB:
				inp_flowadv(fce->fce_flowid);
				break;

			case FLOWSRC_IFNET:
				ifnet_flowadv(fce->fce_flowid);
				break;

			case FLOWSRC_PF:
			default:
				break;
			}
			flowadv_free_entry(fce);
			lck_mtx_lock_spin(&fadv_lock);

			/* if there's no pending request, we're done */
			if (STAILQ_EMPTY(&fadv_list))
				break;
		}
		fadv_active = 0;
	}
}
Beispiel #24
0
/* Push a new entry into the slow log.
 * This function will make sure to trim the slow log accordingly to the
 * configured max length. 
 * The unit of duration is microseconds */
void slowlog_push_entry_if_needed(struct msg *r, long long duration) {
    if (slowlog_log_slower_than < 0) return; /* Slowlog disabled */
    if (duration >= slowlog_log_slower_than) {
        slowlog_entry *se = slowlog_create_entry(r,duration);
        pthread_rwlock_wrlock(&rwlocker);
        se->id = slowlog_entry_id++;
        STAILQ_INSERT_HEAD(&slowlog, se, next);

        if (slowlog_len >= slowlog_max_len) {
            se = STAILQ_LAST(&slowlog, slowlog_entry, next);
            STAILQ_REMOVE(&slowlog, se, slowlog_entry, next);
            slowlog_free_entry(se);
        } else {
            slowlog_len ++;
        }
        pthread_rwlock_unlock(&rwlocker);

        slowlog_statistics_input();
    }
}
Beispiel #25
0
/*
 * mpipe support thread for request failures when mpipe_alloc_callback()
 * is called.
 */
static void
mpipe_thread(void *arg)
{
    malloc_pipe_t mpipe = arg;
    struct mpipe_callback *mcb;

    lwkt_gettoken(&mpipe->token);
    while ((mpipe->mpflags & MPF_EXITING) == 0) {
	while (mpipe->free_count &&
	       (mcb = STAILQ_FIRST(&mpipe->queue)) != NULL) {
		STAILQ_REMOVE(&mpipe->queue, mcb, mpipe_callback, entry);
		mcb->func(mcb->arg1, mcb->arg2);
		kfree(mcb, M_MPIPEARY);
	}
	mpipe->mpflags |= MPF_QUEUEWAIT;
	tsleep(&mpipe->queue, 0, "wait", 0);
    }
    mpipe->thread = NULL;
    wakeup(mpipe);
    lwkt_reltoken(&mpipe->token);
}
Beispiel #26
0
static struct conn *
_conn_get(void)
{
    struct conn *c;

    pthread_mutex_lock(&free_connq_mutex);

    if (!STAILQ_EMPTY(&free_connq)) {
        ASSERT(nfree_connq > 0);

        c = STAILQ_FIRST(&free_connq);
        nfree_connq--;
        STAILQ_REMOVE(&free_connq, c, conn, c_tqe);
    } else {
        c = NULL;
    }

    pthread_mutex_unlock(&free_connq_mutex);

    return c;
}
Beispiel #27
0
static void
apmdtor(void *data)
{
	struct	apm_clone_data *clone;
	struct	acpi_softc *acpi_sc;

	clone = data;
	acpi_sc = clone->acpi_sc;

	/* We are about to lose a reference so check if suspend should occur */
	if (acpi_sc->acpi_next_sstate != 0 &&
	    clone->notify_status != APM_EV_ACKED)
		acpi_AckSleepState(clone, 0);

	/* Remove this clone's data from the list and free it. */
	ACPI_LOCK(acpi);
	STAILQ_REMOVE(&acpi_sc->apm_cdevs, clone, apm_clone_data, entries);
	seldrain(&clone->sel_read);
	knlist_destroy(&clone->sel_read.si_note);
	ACPI_UNLOCK(acpi);
	free(clone, M_APMDEV);
}
Beispiel #28
0
static int
apmclose(struct cdev *dev, int flag, int fmt, d_thread_t *td)
{
	struct	apm_clone_data *clone;
	struct	acpi_softc *acpi_sc;

	clone = dev->si_drv1;
	acpi_sc = clone->acpi_sc;

	/* We are about to lose a reference so check if suspend should occur */
	if (acpi_sc->acpi_next_sstate != 0 &&
	    clone->notify_status != APM_EV_ACKED)
		acpi_AckSleepState(clone, 0);

	/* Remove this clone's data from the list and free it. */
	ACPI_LOCK(acpi);
	STAILQ_REMOVE(&acpi_sc->apm_cdevs, clone, apm_clone_data, entries);
	knlist_destroy(&clone->sel_read.si_note);
	ACPI_UNLOCK(acpi);
	free(clone, M_APMDEV);
	destroy_dev_sched(dev);
	return (0);
}
Beispiel #29
0
void
remove_backend(struct Backend_head *head, struct Backend *backend) {
    STAILQ_REMOVE(head, backend, Backend, entries);
    free_backend(backend);
}
Beispiel #30
0
/*
 * Note, epaddr is the master.
 */
int __recvpath
psmi_mq_handle_outoforder_queue(psm_epaddr_t epaddr)
{
    psm_mq_t mq = epaddr->ep->mq;
    psm_mq_req_t ureq, ereq;
    uint32_t msglen;

    next_ooo:
    ureq = mq_ooo_match(&epaddr->outoforder_q, epaddr->mctxt_recv_seqnum);
    if (ureq == NULL) return 0;
    epaddr->mctxt_recv_seqnum++;
    epaddr->outoforder_c--;

    ereq = mq_req_match(&(mq->expected_q), ureq->tag, 1);
    if (ereq == NULL) {
	mq_sq_append(&mq->unexpected_q, ureq);
	if (epaddr->outoforder_c) goto next_ooo;
	return 0;
    }

    psmi_assert(MQE_TYPE_IS_RECV(ereq->type));
    ereq->tag = ureq->tag;
    msglen = mq_set_msglen(ereq, ereq->buf_len, ureq->send_msglen);

    switch (ureq->state) {
    case MQ_STATE_COMPLETE:
	if (ureq->buf != NULL) { /* 0-byte don't alloc a sysbuf */
	    psmi_mq_mtucpy(ereq->buf,
		(const void *)ureq->buf, msglen);
	    psmi_mq_sysbuf_free(mq, ureq->buf);
	}
	ereq->state = MQ_STATE_COMPLETE;
	mq_qq_append(&mq->completed_q, ereq);
	break;
    case MQ_STATE_UNEXP: /* not done yet */
	ereq->type = ureq->type;
	ereq->egrid = ureq->egrid;
	ereq->epaddr = ureq->epaddr;
	ereq->send_msgoff = ureq->send_msgoff;
	ereq->recv_msgoff = min(ureq->recv_msgoff, msglen);
	psmi_mq_mtucpy(ereq->buf,
	    (const void *)ureq->buf, ereq->recv_msgoff);
	psmi_mq_sysbuf_free(mq, ureq->buf);
	ereq->state = MQ_STATE_MATCHED;
	STAILQ_INSERT_AFTER(&ureq->epaddr->mctxt_master->egrlong,
			ureq, ereq, nextq);
	STAILQ_REMOVE(&ureq->epaddr->mctxt_master->egrlong,
			ureq, psm_mq_req, nextq);
	break;
    case MQ_STATE_UNEXP_RV: /* rendez-vous ... */
	ereq->state = MQ_STATE_MATCHED;
	ereq->rts_peer = ureq->rts_peer;
	ereq->rts_sbuf = ureq->rts_sbuf;
	ereq->send_msgoff = 0;
	ereq->rts_callback = ureq->rts_callback;
	ereq->rts_reqidx_peer = ureq->rts_reqidx_peer;
	ereq->type = ureq->type;
	ereq->rts_callback(ereq, 0);
	break;
    default:
	fprintf(stderr, "Unexpected state %d in req %p\n", ureq->state, ureq);
	fprintf(stderr, "type=%d, mq=%p, tag=%p\n",
			ureq->type, ureq->mq, (void *)(uintptr_t)ureq->tag);
	abort();
    }

    psmi_mq_req_free(ureq);
    if (epaddr->outoforder_c) goto next_ooo;
    return 0;
}