Esempio n. 1
0
void
pktref_insert_head(struct th_pktref_queue *q, th_pkt_t *pkt)
{
  th_pktref_t *pr;

  pr = pktref_create(pkt);
  TAILQ_INSERT_HEAD(q, pr, pr_link);
}
Esempio n. 2
0
void
bioq_insert_head(struct bio_queue_head *head, struct bio *bp)
{

	if (head->insert_point == NULL)
		head->last_offset = bp->bio_offset;
	TAILQ_INSERT_HEAD(&head->queue, bp, bio_queue);
}
Esempio n. 3
0
int
pmclog_deconfigure_log(struct pmc_owner *po)
{
	int error;
	struct pmclog_buffer *lb;

	PMCDBG(LOG,CFG,1, "de-config po=%p", po);

	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
		return (EINVAL);

	KASSERT(po->po_sscount == 0,
	    ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po));
	KASSERT(po->po_file != NULL,
	    ("[pmclog,%d] po=%p no log file", __LINE__, po));

	/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
	pmclog_stop_kthread(po);

	KASSERT(po->po_kthread == NULL,
	    ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po));

	/* return all queued log buffers to the global pool */
	while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
		TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/* return the 'current' buffer to the global pool */
	if ((lb = po->po_curbuf) != NULL) {
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/* drop a reference to the fd */
	error = fdrop(po->po_file, curthread);
	po->po_file  = NULL;
	po->po_error = 0;

	return (error);
}
Esempio n. 4
0
struct genlist_entry *
genlist_insert (struct genlist *head, void *data)
{
	struct genlist_entry *entry = calloc(sizeof(struct genlist_entry), 1);
	entry->data = data;
	TAILQ_INSERT_HEAD(head, entry, chain);
	return entry;
}
Esempio n. 5
0
/* Function add_to_queue
 * Given a string representing a graph, the string will be added to the queue
 * graph: the string representing the graph 
*/
void add_to_queue(char * graph){
    struct entry *elem;
    elem = malloc(sizeof(struct entry));
    if(elem){
        elem->graph_text = graph;
    }
    TAILQ_INSERT_HEAD(&head, elem, entries);
}
Esempio n. 6
0
static void gg_dialog_cls(gg_dialog_t *dialog)
{
	if ((dialog->flags & GG_DIALOG_AUTOHIDE_PARENT) && dialog->parent_dialog)
		dialog->parent_dialog->flags &= ~GG_DIALOG_HIDDEN;

	TAILQ_REMOVE(&dialogs, dialog, entries);
	TAILQ_INSERT_HEAD(&closed_dialogs, dialog, entries);
}
Esempio n. 7
0
void
dmsg_put(struct dmsg *dmsg)
{
    log_debug(LOG_VVVERB, "put dmsg %p id %"PRIu64"", dmsg, dmsg->id);

    nfree_dmsgq++;
    TAILQ_INSERT_HEAD(&free_dmsgq, dmsg, m_tqe);
}
Esempio n. 8
0
enum cmd_retval
cmd_break_pane_exec(struct cmd *self, struct cmd_q *cmdq)
{
	struct args		*args = self->args;
	struct winlink		*wl;
	struct session		*s;
	struct window_pane	*wp;
	struct window		*w;
	char			*name;
	char			*cause;
	int			 base_idx;
	struct client		*c;
	struct format_tree	*ft;
	const char		*template;
	char			*cp;

	if ((wl = cmd_find_pane(cmdq, args_get(args, 't'), &s, &wp)) == NULL)
		return (CMD_RETURN_ERROR);

	if (window_count_panes(wl->window) == 1) {
		cmdq_error(cmdq, "can't break with only one pane");
		return (CMD_RETURN_ERROR);
	}

	w = wl->window;
	server_unzoom_window(w);

	TAILQ_REMOVE(&w->panes, wp, entry);
	if (wp == w->active) {
		w->active = w->last;
		w->last = NULL;
		if (w->active == NULL) {
			w->active = TAILQ_PREV(wp, window_panes, entry);
			if (w->active == NULL)
				w->active = TAILQ_NEXT(wp, entry);
		}
	} else if (wp == w->last)
		w->last = NULL;
	layout_close_pane(wp);

	w = wp->window = window_create1(s->sx, s->sy);
	TAILQ_INSERT_HEAD(&w->panes, wp, entry);
	w->active = wp;
	name = default_window_name(w);
	window_set_name(w, name);
	free(name);
	layout_init(w, wp);

	base_idx = options_get_number(&s->options, "base-index");
	wl = session_attach(s, w, -1 - base_idx, &cause); /* can't fail */
	if (!args_has(self->args, 'd'))
		session_select(s, wl->idx);

	server_redraw_session(s);
	server_status_session_group(s);

	if (args_has(args, 'P')) {
		if ((template = args_get(args, 'F')) == NULL)
Esempio n. 9
0
enum cmd_retval
cmd_break_pane_exec(struct cmd *self, struct cmd_q *cmdq)
{
#ifdef TMATE_SLAVE
    return (CMD_RETURN_ERROR);
#else
    struct args		*args = self->args;
    struct winlink		*wl = cmdq->state.sflag.wl;
    struct session		*src_s = cmdq->state.sflag.s;
    struct session		*dst_s = cmdq->state.tflag.s;
    struct window_pane	*wp = cmdq->state.sflag.wp;
    struct window		*w = wl->window;
    char			*name;
    char			*cause;
    int			 idx = cmdq->state.tflag.idx;
    struct format_tree	*ft;
    const char		*template;
    char			*cp;

    if (idx != -1 && winlink_find_by_index(&dst_s->windows, idx) != NULL) {
        cmdq_error(cmdq, "index %d already in use", idx);
        return (CMD_RETURN_ERROR);
    }

    if (window_count_panes(w) == 1) {
        cmdq_error(cmdq, "can't break with only one pane");
        return (CMD_RETURN_ERROR);
    }
    server_unzoom_window(w);

    TAILQ_REMOVE(&w->panes, wp, entry);
    window_lost_pane(w, wp);
    layout_close_pane(wp);

    w = wp->window = window_create1(dst_s->sx, dst_s->sy);
    TAILQ_INSERT_HEAD(&w->panes, wp, entry);
    w->active = wp;
    name = default_window_name(w);
    window_set_name(w, name);
    free(name);
    layout_init(w, wp);
    wp->flags |= PANE_CHANGED;

    if (idx == -1)
        idx = -1 - options_get_number(dst_s->options, "base-index");
    wl = session_attach(dst_s, w, idx, &cause); /* can't fail */
    if (!args_has(self->args, 'd'))
        session_select(dst_s, wl->idx);

    server_redraw_session(src_s);
    if (src_s != dst_s)
        server_redraw_session(dst_s);
    server_status_session_group(src_s);
    if (src_s != dst_s)
        server_status_session_group(dst_s);

    if (args_has(args, 'P')) {
        if ((template = args_get(args, 'F')) == NULL)
Esempio n. 10
0
File: move.c Progetto: Chr1stoph/i3
/*
 * This function detaches 'con' from its parent and inserts it either before or
 * after 'target'.
 *
 */
static void insert_con_into(Con *con, Con *target, position_t position) {
    Con *parent = target->parent;
    /* We need to preserve the old con->parent. While it might still be used to
     * insert the entry before/after it, we call the on_remove_child callback
     * afterwards which might then close the con if it is empty. */
    Con *old_parent = con->parent;

    con_detach(con);
    con_fix_percent(con->parent);

    /* When moving to a workspace, we respect the user’s configured
     * workspace_layout */
    if (parent->type == CT_WORKSPACE) {
        Con *split = workspace_attach_to(parent);
        if (split != parent) {
            DLOG("Got a new split con, using that one instead\n");
            con->parent = split;
            con_attach(con, split, false);
            DLOG("attached\n");
            con->percent = 0.0;
            con_fix_percent(split);
            con = split;
            DLOG("ok, continuing with con %p instead\n", con);
            con_detach(con);
        }
    }

    con->parent = parent;

    if (position == BEFORE) {
        TAILQ_INSERT_BEFORE(target, con, nodes);
        TAILQ_INSERT_HEAD(&(parent->focus_head), con, focused);
    } else if (position == AFTER) {
        TAILQ_INSERT_AFTER(&(parent->nodes_head), target, con, nodes);
        TAILQ_INSERT_HEAD(&(parent->focus_head), con, focused);
    }

    /* Pretend the con was just opened with regards to size percent values.
     * Since the con is moved to a completely different con, the old value
     * does not make sense anyways. */
    con->percent = 0.0;
    con_fix_percent(parent);

    CALL(old_parent, on_remove_child);
}
Esempio n. 11
0
void
spifi_free_scb(struct spifi_softc *sc, struct spifi_scb *scb)
{
	int s;

	s = splbio();
	TAILQ_INSERT_HEAD(&sc->free_scb, scb, chain);
	splx(s);
}
Esempio n. 12
0
static void
dev_free_devlocked(struct cdev *cdev)
{
	struct cdev_priv *cdp;

	mtx_assert(&devmtx, MA_OWNED);
	cdp = cdev2priv(cdev);
	TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list);
}
Esempio n. 13
0
static __inline void
amr_requeue_ccb(struct amr_softc *sc, union ccb *ccb)
{
    int		s;

    s = splbio();
    TAILQ_INSERT_HEAD(&sc->amr_cam_ccbq, &ccb->ccb_h, sim_links.tqe);
    splx(s);
}
Esempio n. 14
0
void rd_lru_push (rd_lru_t *rlru, void *ptr) {
	rd_lru_elm_t *rlrue;

	rlrue = calloc(1, sizeof(*rlrue));
	rlrue->rlrue_ptr = ptr;

	TAILQ_INSERT_HEAD(&rlru->rlru_elms, rlrue, rlrue_link);
	rlru->rlru_cnt++;
}
Esempio n. 15
0
void thread_init_function(void)
{
	if(!threadList.isInitialized)
	{

		threadList.isInitialized = TRUE;
		TAILQ_INIT(&threadList.list);
		TAILQ_INIT(&threadList.list_sleeping);
		TAILQ_INIT(&threadList.list_dead);

		atexit(threads_destroy);

		// il faut récupérer le contexte courant et le mettre dans threadList.mainThread, ainsi que l'ajouter
		thread_t thread = malloc(sizeof(struct thread_t_));
		if(thread == NULL)
		{
			perror("malloc");
			return;
		}

		#ifdef DEBUG_MODE
		thread->id = 0;
		thread->nb_calls = 0;
		#endif

		thread->state = READY;
		thread->retval = NULL;
		thread->default_priority = DEFAULT_PRIORITY;
		thread->current_priority = DEFAULT_PRIORITY;

		getcontext(&(thread->context));

		thread->valgrind_stackid = VALGRIND_STACK_REGISTER((thread->context).uc_stack.ss_sp,
								   (thread->context).uc_stack.ss_sp +
								   (thread->context).uc_stack.ss_size);


		threadList.max_priority = 1;
		threadList.mainThread = thread;
		threadList.currentThread = thread;
		TAILQ_INSERT_HEAD(&(threadList.list), thread, entries);


		getcontext(&return_t);
		return_t.uc_stack.ss_size = STACK_SIZE;
		return_t.uc_stack.ss_sp = malloc(STACK_SIZE);

		if(return_t.uc_stack.ss_sp == NULL)
		{
			perror("malloc");
			return;
		}

		return_t.uc_link = NULL;
		makecontext(&return_t, (void (*)(void))thread_return, 0);
	}
}
Esempio n. 16
0
void
arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
{
	struct arm_small_page *pg;
	
	bytes &= ~PAGE_MASK;
	while (bytes > 0) {
		pg = (struct arm_small_page *)list;
		pg->addr = mem;
		if (pagetable)
			TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
		else
			TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
		list = (char *)list + sizeof(*pg);
		mem = (char *)mem + PAGE_SIZE;
		bytes -= PAGE_SIZE;
	}
}
Esempio n. 17
0
static void
recover_pts(tsfix_t *tf, tfstream_t *tfs, th_pkt_t *pkt)
{
  th_pktref_t *pr, *srch;

  pktref_enqueue(&tf->tf_ptsq, pkt);

  while((pr = TAILQ_FIRST(&tf->tf_ptsq)) != NULL) {
    
    pkt = pr->pr_pkt;
    TAILQ_REMOVE(&tf->tf_ptsq, pr, pr_link);

    tfs = tfs_find(tf, pkt);

    switch(tfs->tfs_type) {

    case SCT_MPEG2VIDEO:

      switch(pkt->pkt_frametype) {
      case PKT_B_FRAME:
	/* B-frames have same PTS as DTS, pass them on */
	pkt->pkt_pts = pkt->pkt_dts;
	tsfixprintf("TSFIX: %-12s PTS b-frame set to %"PRId64"\n",
		    streaming_component_type2txt(tfs->tfs_type),
		    pkt->pkt_dts);
	break;
      
      case PKT_I_FRAME:
      case PKT_P_FRAME:
	/* Presentation occures at DTS of next I or P frame,
	   try to find it */
	TAILQ_FOREACH(srch, &tf->tf_ptsq, pr_link)
	  if (tfs_find(tf, srch->pr_pkt) == tfs &&
	      srch->pr_pkt->pkt_frametype <= PKT_P_FRAME) {
	    pkt->pkt_pts = srch->pr_pkt->pkt_dts;
	    tsfixprintf("TSFIX: %-12s PTS *-frame set to %"PRId64"\n",
			streaming_component_type2txt(tfs->tfs_type),
			pkt->pkt_pts);
	    break;
	  }
	if (srch == NULL) {
	  /* return packet back to tf_ptsq */
	  TAILQ_INSERT_HEAD(&tf->tf_ptsq, pr, pr_link);
	  return; /* not arrived yet, wait */
        }
      }
      break;

    default:
      break;
    }

    free(pr);
    normalize_ts(tf, tfs, pkt);
  }
}
Esempio n. 18
0
static struct mbuf_block *
_mbuf_block_get(struct mbuf_pool *pool)
{
	struct mbuf_block *mbuf_block;
	char *buf;

	if (!STAILQ_EMPTY(&pool->free_mbuf_blockq)) {
		assert(pool->nfree_mbuf_blockq > 0);

		mbuf_block = STAILQ_FIRST(&pool->free_mbuf_blockq);
		pool->nfree_mbuf_blockq--;
		STAILQ_REMOVE_HEAD(&pool->free_mbuf_blockq, next);

		assert(mbuf_block->magic == MBUF_BLOCK_MAGIC);
		goto done;
	}

	buf = (char *) malloc(pool->mbuf_block_chunk_size);
	if (OXT_UNLIKELY(buf == NULL)) {
		return NULL;
	}

	/*
	 * mbuf_block header is at the tail end of the mbuf_block. This enables us to catch
	 * buffer overrun early by asserting on the magic value during get or
	 * put operations
	 *
	 *   <------------- mbuf_block_chunk_size ------------------->
	 *   +-------------------------------------------------------+
	 *   |       mbuf_block data          |  mbuf_block header   |
	 *   |     (mbuf_block_offset)        | (struct mbuf_block)  |
	 *   +-------------------------------------------------------+
	 *   ^                                ^^
	 *   |                                ||
	 *   \                                |\
	 * block->start                       | block->end (one byte past valid bound)
	 *                                    \
	 *                                    block
	 *
	 */
	mbuf_block = (struct mbuf_block *)(buf + pool->mbuf_block_offset);
	mbuf_block->magic = MBUF_BLOCK_MAGIC;
	mbuf_block->pool  = pool;
	mbuf_block->refcount = 1;

done:
	STAILQ_NEXT(mbuf_block, next) = NULL;
	#ifdef MBUF_ENABLE_DEBUGGING
		TAILQ_INSERT_HEAD(&pool->active_mbuf_blockq, mbuf_block, active_q);
	#endif
	#ifdef MBUF_ENABLE_BACKTRACES
		mbuf_block->backtrace = strdup(oxt::thread::current_backtrace().c_str());
	#endif
	pool->nactive_mbuf_blockq++;
	return mbuf_block;
}
Esempio n. 19
0
/* 
 * Insertion is O(n) due to the priority scan, but optimises to O(1)
 * if all priorities are identical.
 *
 * MPSAFE
 */
eventhandler_tag
eventhandler_register(struct eventhandler_list *list, const char *name, 
		      void *func, void *arg, int priority)
{
    struct eventhandler_entry_generic	*eg;
    struct eventhandler_entry		*ep;
    
    lwkt_gettoken(&evlist_token);

    /*
     * find/create the list as needed
     */
    while (list == NULL) {
	list = eventhandler_find_list(name);
	if (list)
		break;
	list = kmalloc(sizeof(struct eventhandler_list) + strlen(name) + 1,
		       M_EVENTHANDLER, M_INTWAIT);
	if (eventhandler_find_list(name)) {
	    kfree(list, M_EVENTHANDLER);
	    list = NULL;
	} else {
	    list->el_flags = 0;
	    list->el_name = (char *)list + sizeof(struct eventhandler_list);
	    strcpy(list->el_name, name);
	    TAILQ_INSERT_HEAD(&eventhandler_lists, list, el_link);
	}
    }

    if (!(list->el_flags & EHE_INITTED)) {
	TAILQ_INIT(&list->el_entries);
	list->el_flags = EHE_INITTED;
    }
    
    /* allocate an entry for this handler, populate it */
    eg = kmalloc(sizeof(struct eventhandler_entry_generic),
		M_EVENTHANDLER, M_INTWAIT);
    eg->func = func;
    eg->ee.ee_arg = arg;
    eg->ee.ee_priority = priority;
    
    /* sort it into the list */
    for (ep = TAILQ_FIRST(&list->el_entries);
	 ep != NULL; 
	 ep = TAILQ_NEXT(ep, ee_link)) {
	if (eg->ee.ee_priority < ep->ee_priority) {
	    TAILQ_INSERT_BEFORE(ep, &eg->ee, ee_link);
	    break;
	}
    }
    if (ep == NULL)
	TAILQ_INSERT_TAIL(&list->el_entries, &eg->ee, ee_link);
    lwkt_reltoken(&evlist_token);

    return(&eg->ee);
}
Esempio n. 20
0
File: usdf_msg.c Progetto: ORNL/ompi
static void inline
usdf_msg_recv_complete(struct usdf_ep *ep, struct usdf_msg_qe *rqe)
{
	struct usdf_cq_hard *hcq;

	hcq = ep->ep_rx->r.msg.rx_hcq;
	hcq->cqh_post(hcq, rqe->ms_context, rqe->ms_length);

	TAILQ_INSERT_HEAD(&ep->ep_rx->r.msg.rx_free_rqe, rqe, ms_link);
}
Esempio n. 21
0
void
dmsg_put(struct dmsg *dmsg)
{
#ifdef DN_DEBUG_LOG
    log_debug(LOG_VVERB, "put dmsg %p id %"PRIu64"", dmsg, dmsg->id);
#endif

    nfree_dmsgq++;
    TAILQ_INSERT_HEAD(&free_dmsgq, dmsg, m_tqe);
}
Esempio n. 22
0
int
dpaa2_create_dpbp_device(
		int dpbp_id)
{
	struct dpaa2_dpbp_dev *dpbp_node;
	int ret;

	if (!dpbp_dev_list) {
		dpbp_dev_list = malloc(sizeof(struct dpbp_device_list));
		if (!dpbp_dev_list) {
			PMD_INIT_LOG(ERR, "Memory alloc failed in DPBP list\n");
			return -1;
		}
		/* Initialize the DPBP List */
		TAILQ_INIT(dpbp_dev_list);
	}

	/* Allocate DPAA2 dpbp handle */
	dpbp_node = (struct dpaa2_dpbp_dev *)
			malloc(sizeof(struct dpaa2_dpbp_dev));
	if (!dpbp_node) {
		PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
		return -1;
	}

	/* Open the dpbp object */
	dpbp_node->dpbp.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
	ret = dpbp_open(&dpbp_node->dpbp,
			CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
	if (ret) {
		PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
			     ret);
		free(dpbp_node);
		return -1;
	}

	/* Clean the device first */
	ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
	if (ret) {
		PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
					" error code %d\n", ret);
		dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
		free(dpbp_node);
		return -1;
	}

	dpbp_node->dpbp_id = dpbp_id;
	rte_atomic16_init(&dpbp_node->in_use);

	TAILQ_INSERT_HEAD(dpbp_dev_list, dpbp_node, next);

	PMD_INIT_LOG(DEBUG, "Buffer pool resource initialized %d", dpbp_id);

	return 0;
}
Esempio n. 23
0
/*
 * Allocation cache slot and memory
 */
int nvmed_cache_alloc(NVMED* nvmed, unsigned int size, bool lazy_init) {
	int i;
	unsigned int req_size;
	NVMED_CACHE_SLOT *slot;
	NVMED_CACHE *info;
	u64 *paList;

	pthread_spin_lock(&nvmed->mngt_lock);

	if(size == 0) return -NVMED_FAULT;
	if(size == nvmed->num_cache_size) return 0;
	if(size < nvmed->num_cache_size) {
		nvmed_printf("%s: Cache shrinking is not supported\n", nvmed->ns_path);
		return -NVMED_FAULT;
	}
	
	req_size = size - nvmed->num_cache_size;
	slot = malloc(sizeof(NVMED_CACHE_SLOT));
	
	slot->cache_info = malloc(sizeof(NVMED_CACHE) * req_size);
	slot->cache_ptr = mmap(NULL, PAGE_SIZE * req_size, PROT_READ | PROT_WRITE, 
			MAP_ANONYMOUS | MAP_LOCKED | MAP_SHARED, -1, 0);
	slot->size = req_size;
	LIST_INSERT_HEAD(&nvmed->slot_head, slot, slot_list);
	
	/* Initialize memory and translate virt to phys addr */
	if(!lazy_init) {
		paList = malloc(sizeof(u64) * req_size);
		virt_to_phys(nvmed, slot->cache_ptr, paList, PAGE_SIZE * req_size);
	}

	/* fill cache info and add to free list */
	for(i=0; i<req_size; i++) {
		info = slot->cache_info + i;
		info->lpaddr = 0;
		info->ref = 0;
		if(lazy_init == false) {
			info->paddr = paList[i];
			FLAG_SET(info, CACHE_FREE);
		}
		else {
			info->paddr = 0;
			FLAG_SET(info, CACHE_UNINIT | CACHE_FREE);
		}
		info->ptr = slot->cache_ptr + (i*PAGE_SIZE);

		TAILQ_INSERT_HEAD(&nvmed->free_head, info, cache_list);
	}
	
	nvmed->num_cache_size = size;

	pthread_spin_unlock(&nvmed->mngt_lock);

	return req_size;
}
Esempio n. 24
0
void
viomb_deflate(struct viomb_softc *sc)
{
	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
	struct balloon_req *b;
	struct vm_page *p;
	struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
	u_int64_t nvpages;
	int i, slot;

	nvpages = sc->sc_actual - sc->sc_npages;
	if (nvpages > PGS_PER_REQ)
		nvpages = PGS_PER_REQ;
	b = &sc->sc_req;
	b->bl_nentries = nvpages;

	TAILQ_INIT(&b->bl_pglist);
	for (i = 0; i < nvpages; i++) {
		p = TAILQ_FIRST(&sc->sc_balloon_pages);
		if (p == NULL){
		    b->bl_nentries = i - 1;
		    break;
		}
		TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq);
		TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq);
		b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE;
	}

	if (virtio_enqueue_prep(vq, &slot)) {
		printf("%s:virtio_get_slot(def) vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	if (virtio_enqueue_reserve(vq, slot, 1)) {
		printf("%s:virtio_enqueue_reserve() vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
		    sizeof(u_int32_t) * nvpages,
		    BUS_DMASYNC_PREWRITE);
	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
			 sizeof(u_int32_t) * nvpages, VRING_READ);

	if (!(vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST))
		uvm_pglistfree(&b->bl_pglist);
	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
	return;
err:
	while ((p = TAILQ_LAST(&b->bl_pglist, pglist))) {
		TAILQ_REMOVE(&b->bl_pglist, p, pageq);
		TAILQ_INSERT_HEAD(&sc->sc_balloon_pages, p, pageq);
	}
	return;
}
Esempio n. 25
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	void *ret;
	struct arm_small_page *sp;
	TAILQ_HEAD(,arm_small_page) *head;
	vm_page_t m;

	*flags = UMA_SLAB_PRIV;
	/*
	 * For CPUs where we setup page tables as write back, there's no
	 * need to maintain two separate pools.
	 */
	if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
		head = (void *)&pages_wt;
	else
		head = (void *)&pages_normal;

	mtx_lock(&smallalloc_mtx);
	sp = TAILQ_FIRST(head);

	if (!sp) {
		int pflags;

		mtx_unlock(&smallalloc_mtx);
		if (zone == l2zone &&
		    pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
			*flags = UMA_SLAB_KMEM;
			ret = ((void *)kmem_malloc(kmem_arena, bytes,
			    M_NOWAIT));
			return (ret);
		}
		pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
		for (;;) {
			m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
			if (m == NULL) {
				if (wait & M_NOWAIT)
					return (NULL);
				VM_WAIT;
			} else
				break;
		}
		ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
		if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
			bzero(ret, PAGE_SIZE);
		return (ret);
	}
	TAILQ_REMOVE(head, sp, pg_list);
	TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
	ret = sp->addr;
	mtx_unlock(&smallalloc_mtx);
	if ((wait & M_ZERO))
		bzero(ret, bytes);
	return (ret);
}
Esempio n. 26
0
static void
dev_free_devlocked(struct cdev *cdev)
{
	struct cdev_priv *cdp;

	mtx_assert(&devmtx, MA_OWNED);
	cdp = cdev2priv(cdev);
	KASSERT((cdp->cdp_flags & CDP_UNREF_DTR) == 0,
	    ("destroy_dev() was not called after delist_dev(%p)", cdev));
	TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list);
}
Esempio n. 27
0
int qw_push(void *handle, void *data)
{
    struct qw_node *node = NULL;
    QW *qw_p = (QW *)handle;

    node = (struct qw_node *) malloc(sizeof(struct qw_node));
    node->d = data;
    TAILQ_INSERT_HEAD(&qw_p->h, node, entries);

    return (0);
}
Esempio n. 28
0
void
conn_put(struct conn *conn)
{
    ASSERT(conn->sd < 0);
    ASSERT(conn->owner == NULL);

    log_debug(LOG_VVERB, "put conn %p", conn);

    nfree_connq++;
    TAILQ_INSERT_HEAD(&free_connq, conn, conn_tqe);
}
Esempio n. 29
0
void
send_buffer_list_delete(struct send_buffer_list *sblist,
    struct send_buffer_list_entry *sblist_entry)
{

	/*
	 * Move item to free list
	 */
	TAILQ_REMOVE(&sblist->list, sblist_entry, entries);
	TAILQ_INSERT_HEAD(&sblist->free_list, sblist_entry, entries);
}
static void
nkn_pool_page_free(nkn_page_pool_t *pool, void *page)
{
	nkn_pp_entry_t *ppe = (nkn_pp_entry_t *)page;
	
	pthread_mutex_lock(&pool->lock);
	TAILQ_INSERT_HEAD(&pool->flist, ppe, plist);
	pool->freepages++;
	pool->stats.free++;
	pthread_mutex_unlock(&pool->lock);
}