Esempio n. 1
0
static PKT_LIST *
select_best(dhcp_smach_t *dsmp)
{
	PKT_LIST	*current = dsmp->dsm_recv_pkt_list;
	PKT_LIST	*next, *best = NULL;
	int		points, best_points = -1;

	/*
	 * pick out the best offer.  point system.
	 * what's important for IPv4?
	 *
	 *	0) DHCP (30 points)
	 *	1) no option overload
	 *	2) encapsulated vendor option (80 points)
	 *	3) non-null sname and siaddr fields
	 *	4) non-null file field
	 *	5) hostname (5 points)
	 *	6) subnetmask (1 point)
	 *	7) router (1 point)
	 */

	for (; current != NULL; current = next) {
		next = current->next;

		points = current->isv6 ?
		    compute_points_v6(current, dsmp) :
		    compute_points_v4(current);

		/*
		 * Just discard any unacceptable entries we encounter.
		 */
		if (points == -1) {
			remque(current);
			free_pkt_entry(current);
			continue;
		}

		dhcpmsg(MSG_DEBUG, "select_best: OFFER had %d points", points);

		/* Special case: stop now and select */
		if (points == -2) {
			best = current;
			break;
		}

		if (points >= best_points) {
			best_points = points;
			best = current;
		}
	}

	if (best != NULL) {
		dhcpmsg(MSG_DEBUG, "select_best: most points: %d", best_points);
		remque(best);
	} else {
		dhcpmsg(MSG_DEBUG, "select_best: no valid OFFER/BOOTP reply");
	}

	return (best);
}
Esempio n. 2
0
int lck$deqlock(struct _lkb *lck, int flags, unsigned int lkid) {
  vmslock(&SPIN_SCS,IPL$_SCS);
  struct _rsb * res = lck->lkb$l_rsb;
  int newmode;

  remque(&lck->lkb$l_ownqfl,0);
  remque(&lck->lkb$l_sqfl,0);

  // check if no locks on resource, remove the resource then

  newmode=find_highest(lck,res);

  res->rsb$b_fgmode=newmode;
  res->rsb$b_ggmode=newmode;
  res->rsb$b_cgmode=newmode;

  grant_queued(res,0,1,1);

  if (lck->lkb$b_state) {
  }
  kfree(lck);
  lockidtbl[lkid] = lkid + 1;
  if (aqempty(&res->rsb$l_grqfl) && aqempty(&res->rsb$l_cvtqfl) && aqempty(&res->rsb$l_wtqfl) && aqempty(&res->rsb$l_rrsfl) && aqempty(&res->rsb$l_srsfl)) {
    remque(res, 0);
    kfree(res);
  }
  vmsunlock(&SPIN_SCS,IPL$_ASTDEL);
}
Esempio n. 3
0
void
m_free(struct mbuf *m)
{

  DEBUG_CALL("m_free");
  DEBUG_ARG("m = %lx", (long )m);

  if(m) {
	
	if (m->m_flags & M_USEDLIST)
	   remque(m);

	
	if (m->m_flags & M_EXT)
	   free(m->m_ext);

	if (m->m_flags & M_DOFREE) {
		free(m);
		mbuf_alloced--;
	} else if ((m->m_flags & M_FREELIST) == 0) {
		insque(m,&m_freelist);
		m->m_flags = M_FREELIST; 
	}
  } 
}
Esempio n. 4
0
static int istgt_ktarget_destroy(int tid)
{
	int err;
	struct target* target;

	if (!(target = target_find_by_id(tid)))
		return -ENOENT;

	if (target->nr_sessions)
		return -EBUSY;

	if (!list_empty(&target->sessions_list)) {
		eprintf("bug still have sessions %d\n", tid);
		exit(-1);
	}

	err = ktarget_destroy(tid);
	if (err < 0)
		return err;

	remque(&target->tlist);

	free(target);

	return 0;
}
Esempio n. 5
0
/* FUNCTION: tcp_close()
 *
 * Close a TCP control block:
 *   discard all space held by the tcp
 *   discard internet protocol block
 *   wake up any sleepers
 *
 * 
 * PARAM1: struct tcpcb *tp
 *
 * RETURNS: 
 */
struct tcpcb * 
tcp_close(struct tcpcb *tp)
{
   struct tcpiphdr *t;
   struct inpcb *inp = tp->t_inpcb;
   struct socket *so = inp->inp_socket;

   t = tp->seg_next;
   while (t != (struct tcpiphdr *)tp) 
   {
      struct mbuf *m;

      t = (struct tcpiphdr *)t->ti_next;
      m = dtom(t->ti_prev);
      remque(t->ti_prev);
      M_FREEM(m);
   }
   if (tp->t_template)
      TPH_FREE(tp->t_template);
   TCB_FREE(tp);
   inp->inp_ppcb = (char *)NULL;
   soisdisconnected(so);
   in_pcbdetach(inp);
   tcpstat.tcps_closed++;
   return ((struct tcpcb *)NULL);
}
Esempio n. 6
0
mpqueue_head_t *
timer_call_dequeue_unlocked(
	timer_call_t 		call)
{
	call_entry_t	entry = CE(call);
	mpqueue_head_t	*old_queue;

	DBG("timer_call_dequeue_unlocked(%p)\n", call);

	simple_lock(&call->lock);
	old_queue = MPQUEUE(entry->queue);
	if (old_queue != NULL) {
		timer_call_lock_spin(old_queue);
		if (call->async_dequeue) {
			/* collision (1c): null queue pointer and reset flag */
			call->async_dequeue = FALSE;
#if TIMER_ASSERT
			timer_call_dequeue_unlocked_async1++;
#endif
		} else {
			(void)remque(qe(entry));
#if TIMER_ASSERT
			timer_call_dequeue_unlocked_async2++;
#endif
		}
		entry->queue = NULL;
		timer_call_unlock(old_queue);
	}
	simple_unlock(&call->lock);
	return (old_queue);
}
Esempio n. 7
0
void
m_free(struct mbuf *m)
{

  DEBUG_CALL("m_free");
  DEBUG_ARG("m = %p", m);

  if(m) {
	/* Remove from m_usedlist */
	if (m->m_flags & M_USEDLIST)
	   remque(m);

	/* If it's M_EXT, free() it */
	if (m->m_flags & M_EXT)
	   free(m->m_ext);

	/*
	 * Either free() it or put it on the free list
	 */
	if (m->m_flags & M_DOFREE) {
		m->slirp->mbuf_alloced--;
		free(m);
	} else if ((m->m_flags & M_FREELIST) == 0) {
		insque(m,&m->slirp->m_freelist);
		m->m_flags = M_FREELIST; /* Clobber other flags */
	}
  } /* if(m) */
}
Esempio n. 8
0
File: mbuf.c Progetto: Pradeo/Slirp
/*
 * Get an mbuf from the free list, if there are none
 * malloc one
 * 
 * Because fragmentation can occur if we alloc new mbufs and
 * free old mbufs, we mark all mbufs above mbuf_thresh as M_DOFREE,
 * which tells m_free to actually free() it
 */
struct mbuf *
m_get()
{
	register struct mbuf *m;
	int flags = 0;
	
	DEBUG_CALL("m_get");
	
	if (m_freelist.m_next == &m_freelist) {
		m = (struct mbuf *)malloc(msize);
		if (m == NULL) goto end_error;
		mbuf_alloced++;
		if (mbuf_alloced > mbuf_thresh)
			flags = M_DOFREE;
		if (mbuf_alloced > mbuf_max)
			mbuf_max = mbuf_alloced;
	} else {
		m = m_freelist.m_next;
		remque(m);
	}
	
	/* Insert it in the used list */
	insque(m,&m_usedlist);
	m->m_flags = (flags | M_USEDLIST);
	
	/* Initialise it */
	m->m_size = msize - sizeof(struct m_hdr);
	m->m_data = m->m_dat;
	m->m_len = 0;
	m->m_nextpkt = 0;
	m->m_prevpkt = 0;
end_error:
	DEBUG_ARG("m = %lx", (long )m);
	return m;
}
Esempio n. 9
0
static uintptr_t
iopa_allocinpage(io_pagealloc_t * pa, uint32_t count, uint64_t align)
{
    uint32_t n, s;
    uint64_t avail = pa->avail;

    assert(avail);

    // find strings of count 1 bits in avail
    for (n = count; n > 1; n -= s)
    {
    	s = n >> 1;
    	avail = avail & (avail << s);
    }
    // and aligned
    avail &= align;

    if (avail)
    {
	n = __builtin_clzll(avail);
	pa->avail &= ~((-1ULL << (64 - count)) >> n);
	if (!pa->avail && pa->link.next)
	{
	    remque(&pa->link);
	    pa->link.next = 0;
	}
	return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
    }

    return (0);
}
Esempio n. 10
0
/*
 * Simple removal of the log record from the log buffer queue.
 * Make sure to manage the count of records queued.
 */
static struct nfslog_lr *
remove_lrp_from_lb(struct nfslog_buf *lbp, struct nfslog_lr *lrp)
{
	if (lbp->lrps == lrp) {
		if (lbp->lrps == lbp->lrps->next) {
			lbp->lrps = NULL;
		} else {
			lbp->lrps = lrp->next;
			remque(lrp);
		}
	} else {
		remque(lrp);
	}
	lbp->num_lrps--;
	return (lrp);
}
Esempio n. 11
0
File: socket.c Progetto: m000/panda
/*
 * remque and free a socket, clobber cache
 */
void
sofree(struct socket *so)
{
  Slirp *slirp = so->slirp;

  soqfree(so, &slirp->if_fastq);
  soqfree(so, &slirp->if_batchq);

  if (so->so_emu==EMU_RSH && so->extra) {
	sofree(so->extra);
	so->extra=NULL;
  }
  if (so == slirp->tcp_last_so) {
      slirp->tcp_last_so = &slirp->tcb;
  } else if (so == slirp->udp_last_so) {
      slirp->udp_last_so = &slirp->udb;
  } else if (so == slirp->icmp_last_so) {
      slirp->icmp_last_so = &slirp->icmp;
  }
  m_free(so->so_m);

  if(so->so_next && so->so_prev)
    remque(so);  /* crashes if so is not in a queue */

  free(so);
}
Esempio n. 12
0
/*
 * Remove a packet descriptor from the in-use descriptor list,
 * called by mmd_rempdesc or during free.
 */
static pdesc_t *
mmd_destroy_pdesc(multidata_t *mmd, pdesc_t *pd)
{
	pdesc_t *pd_next;

	pd_next = Q2PD(pd->pd_next);
	remque(&(pd->pd_next));

	/* remove all local attributes */
	if (pd->pd_pattbl != NULL)
		mmd_destroy_pattbl(&(pd->pd_pattbl));

	/* don't decrease counts for a removed descriptor */
	if (!(pd->pd_flags & PDESC_REM_DEFER)) {
		if (pd->pd_flags & PDESC_HBUF_REF) {
			ASSERT(mmd->mmd_hbuf_ref > 0);
			mmd->mmd_hbuf_ref--;
		}
		if (pd->pd_flags & PDESC_PBUF_REF) {
			ASSERT(mmd->mmd_pbuf_ref > 0);
			mmd->mmd_pbuf_ref -= pd->pd_pdi.pld_cnt;
		}
		ASSERT(mmd->mmd_pd_cnt > 0);
		mmd->mmd_pd_cnt--;
	}
	return (pd_next);
}
Esempio n. 13
0
/*
 * Remove an attribute from a Multidata.
 */
void
mmd_rempattr(pattr_t *pa)
{
	kmutex_t *pat_lock = pa->pat_lock;

	ASSERT(pa->pat_magic == PATTR_MAGIC);

	/* ignore if attribute was marked as persistent */
	if ((pa->pat_flags & PATTR_PERSIST) != 0)
		return;

	mutex_enter(pat_lock);
	/*
	 * We can't deallocate the associated resources if the Multidata
	 * is shared with other threads, because it's possible that the
	 * attribute handle value is held by those threads.  That's why
	 * we simply mark the entry as "removed".  If there are no other
	 * threads, then we free the attribute.
	 */
	if (pa->pat_mmd->mmd_dp->db_ref > 1) {
		pa->pat_flags |= PATTR_REM_DEFER;
	} else {
		remque(&(pa->pat_next));
		kmem_free(pa, pa->pat_buflen);
	}
	mutex_exit(pat_lock);
}
Esempio n. 14
0
/*
 * Destroy an attribute hash table, called by mmd_rempdesc or during free.
 */
static void
mmd_destroy_pattbl(patbkt_t **tbl)
{
	patbkt_t *bkt;
	pattr_t *pa, *pa_next;
	uint_t i, tbl_sz;

	ASSERT(tbl != NULL);
	bkt = *tbl;
	tbl_sz = bkt->pbkt_tbl_sz;

	/* make sure caller passes in the first bucket */
	ASSERT(tbl_sz > 0);

	/* destroy the contents of each bucket */
	for (i = 0; i < tbl_sz; i++, bkt++) {
		/* we ought to be exclusive at this point */
		ASSERT(MUTEX_NOT_HELD(&(bkt->pbkt_lock)));

		pa = Q2PATTR(bkt->pbkt_pattr_q.ql_next);
		while (pa != Q2PATTR(&(bkt->pbkt_pattr_q))) {
			ASSERT(pa->pat_magic == PATTR_MAGIC);
			pa_next = Q2PATTR(pa->pat_next);
			remque(&(pa->pat_next));
			kmem_free(pa, pa->pat_buflen);
			pa = pa_next;
		}
	}

	kmem_cache_free(pattbl_cache, *tbl);
	*tbl = NULL;

	/* commit all previous stores */
	membar_producer();
}
Esempio n. 15
0
struct mbuf *
m_get(void)
{
	register struct mbuf *m;
	int flags = 0;

	DEBUG_CALL("m_get");

	if (m_freelist.m_next == &m_freelist) {
		m = (struct mbuf *)malloc(SLIRP_MSIZE);
		if (m == NULL) goto end_error;
		mbuf_alloced++;
		if (mbuf_alloced > MBUF_THRESH)
			flags = M_DOFREE;
		if (mbuf_alloced > mbuf_max)
			mbuf_max = mbuf_alloced;
	} else {
		m = m_freelist.m_next;
		remque(m);
	}

	
	insque(m,&m_usedlist);
	m->m_flags = (flags | M_USEDLIST);

	
	m->m_size = SLIRP_MSIZE - sizeof(struct m_hdr);
	m->m_data = m->m_dat;
	m->m_len = 0;
        m->m_nextpkt = NULL;
        m->m_prevpkt = NULL;
end_error:
	DEBUG_ARG("m = %lx", (long )m);
	return m;
}
Esempio n. 16
0
static int
do_test (void)
{
  struct qelem elements[4];
  int ret = 0;

  /* Linear list.  */
  memset (elements, 0xff, sizeof (elements));
  insque (&elements[0], NULL);
  remque (&elements[0]);
  insque (&elements[0], NULL);
  insque (&elements[2], &elements[0]);
  insque (&elements[1], &elements[0]);
  insque (&elements[3], &elements[2]);
  remque (&elements[2]);
  insque (&elements[2], &elements[0]);
  CHECK (elements[0].q_back == NULL);
  CHECK (elements[0].q_forw == &elements[2]);
  CHECK (elements[1].q_back == &elements[2]);
  CHECK (elements[1].q_forw == &elements[3]);
  CHECK (elements[2].q_back == &elements[0]);
  CHECK (elements[2].q_forw == &elements[1]);
  CHECK (elements[3].q_back == &elements[1]);
  CHECK (elements[3].q_forw == NULL);

  /* Circular list.  */
  memset (elements, 0xff, sizeof (elements));
  elements[0].q_back = &elements[0];
  elements[0].q_forw = &elements[0];
  insque (&elements[2], &elements[0]);
  insque (&elements[1], &elements[0]);
  insque (&elements[3], &elements[2]);
  remque (&elements[2]);
  insque (&elements[2], &elements[0]);
  CHECK (elements[0].q_back == &elements[3]);
  CHECK (elements[0].q_forw == &elements[2]);
  CHECK (elements[1].q_back == &elements[2]);
  CHECK (elements[1].q_forw == &elements[3]);
  CHECK (elements[2].q_back == &elements[0]);
  CHECK (elements[2].q_forw == &elements[1]);
  CHECK (elements[3].q_back == &elements[1]);
  CHECK (elements[3].q_forw == &elements[0]);

  return ret;
}
Esempio n. 17
0
static __inline__
void
_delayed_call_dequeue(
	timer_call_t			call)
{
	(void)remque(qe(call));

	call->state = IDLE;
}
Esempio n. 18
0
void
lck_grp_free(
	lck_grp_t	*grp)
{
	lck_mtx_lock(&lck_grp_lock);
	lck_grp_cnt--;
	(void)remque((queue_entry_t)grp);
	lck_mtx_unlock(&lck_grp_lock);
	lck_grp_deallocate(grp);
}
Esempio n. 19
0
/*
 * Remove timer entry from its queue but don't change the queue pointer
 * and set the async_dequeue flag. This is locking case 2b.
 */
static __inline__ void
timer_call_entry_dequeue_async(
	timer_call_t		entry)
{
	mpqueue_head_t	*old_queue = MPQUEUE(CE(entry)->queue);
	if (old_queue) {
		old_queue->count--;
		(void) remque(qe(entry));
		entry->async_dequeue = TRUE;
	}
	return;
}
Esempio n. 20
0
void Lpx_PCB_detach(struct lpxpcb *lpxp )
{
    struct socket *so = lpxp->lpxp_socket;

    so->so_pcb = 0;
    sofree(so);
        
    if (lpxp->lpxp_route.ro_rt != NULL)
        rtfree(lpxp->lpxp_route.ro_rt);
    remque(lpxp);
    FREE(lpxp, M_PCB);
}
Esempio n. 21
0
/* Free all the data cache blocks, thus discarding all cached data.  */
static
void
dcache_flush ()
{
  register struct dcache_block *db;

  while ((db = dcache_valid.next) != &dcache_valid)
    {
      remque (db);
      insque (db, &dcache_free);
    }
}
Esempio n. 22
0
int grant_queued(struct _rsb * res, int ggmode_not, int docvt, int dowt) {
  struct _lkb * head, * tmp;
  int diff;
  int newmode;
  if (docvt) {
    head=&res->rsb$l_cvtqfl;
    tmp=res->rsb$l_cvtqfl;
    diff=((char*)&tmp->lkb$l_sqfl)-((char*)tmp);
    while (head!=tmp) {
      tmp=((char *)tmp)-((char*)diff);
      newmode=res->rsb$b_ggmode;
      if (test_bit(tmp->lkb$b_rqmode,&lck$ar_compat_tbl[res->rsb$b_ggmode])) {
	struct _lkb * lck=tmp;
	struct _lkb * next=tmp->lkb$l_sqfl;
	remque(((long)tmp)+diff,0);
	lck$grant_lock(lck ,res,lck->lkb$b_grmode,lck->lkb$b_rqmode,0,0,res->rsb$b_ggmode);
	tmp=next;
	continue;
      }
      newmode=find_highest(tmp,res);
      if (res->rsb$b_ggmode==tmp->lkb$b_grmode) {
	if (test_bit(tmp->lkb$b_rqmode,&lck$ar_compat_tbl[newmode])) {
	  struct _lkb * lck=tmp;
	  struct _lkb * next=tmp->lkb$l_sqfl;
	  remque(((long)tmp)+diff,0);
	  lck$grant_lock(lck ,res,lck->lkb$b_grmode,lck->lkb$b_rqmode,0,0,res->rsb$b_ggmode);
	  tmp=next;
	  continue;
	} else {
	  res->rsb$b_cgmode=newmode;
	}
      }
      tmp=tmp->lkb$l_sqfl;
    }
  }
  if (dowt) {
  }

}
Esempio n. 23
0
void
OSMalloc_Tagfree(
	 OSMallocTag		tag)
{
	if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
		panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state);

	if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
		OSMalloc_tag_spin_lock();
		(void)remque((queue_entry_t)tag);
		OSMalloc_tag_unlock();
		kfree((void*)tag, sizeof(*tag));
	}
}
Esempio n. 24
0
void Lpx_PCB_dispense(struct lpxpcb *lpxp )
{	
	struct stream_pcb *cb = NULL;

	DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_dispense: Entered.\n"));

	if (lpxp == 0) {
		return;
	}
	
	cb = (struct stream_pcb *)lpxp->lpxp_pcb;
	
	if (cb != 0) {
		
		register struct lpx_stream_q *q;
				
		for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
			q = q->si_prev;
			remque(q->si_next);
		}
		
		m_freem(dtom(cb->s_lpx));
		FREE(cb, M_PCB);
		lpxp->lpxp_pcb = 0;
	}	
	
    // Free Lock.
	if (lpxp->lpxp_mtx != NULL) {
		lck_mtx_free(lpxp->lpxp_mtx, lpxp->lpxp_mtx_grp);  
	}
				
	lck_rw_lock_exclusive(lpxp->lpxp_head->lpxp_list_rw);
	remque(lpxp);
	lck_rw_unlock_exclusive(lpxp->lpxp_head->lpxp_list_rw);
		
	FREE(lpxp, M_PCB);		
}
/*
 * remque and free a socket, clobber cache
 */
void
sofree(PNATState pData, struct socket *so)
{
    LogFlowFunc(("ENTER:%R[natsock]\n", so));
    /*
     * We should not remove socket when polling routine do the polling
     * instead we mark it for deletion.
     */
    if (so->fUnderPolling)
    {
        so->fShouldBeRemoved = 1;
        LogFlowFunc(("LEAVE:%R[natsock] postponed deletion\n", so));
        return;
    }
    /**
     * Check that we don't freeng socket with tcbcb
     */
    Assert(!sototcpcb(so));
    /* udp checks */
    Assert(!so->so_timeout);
    Assert(!so->so_timeout_arg);
    if (so == tcp_last_so)
        tcp_last_so = &tcb;
    else if (so == udp_last_so)
        udp_last_so = &udb;

    /* check if mbuf haven't been already freed  */
    if (so->so_m != NULL)
    {
        m_freem(pData, so->so_m);
        so->so_m = NULL;
    }

    if (so->so_ohdr != NULL)
    {
        RTMemFree(so->so_ohdr);
        so->so_ohdr = NULL;
    }

    if (so->so_next && so->so_prev)
    {
        remque(pData, so);  /* crashes if so is not in a queue */
        NSOCK_DEC();
    }

    RTMemFree(so);
    LogFlowFuncLeave();
}
Esempio n. 26
0
void
sv_inactive(vnode_t *vp)
{
	svnode_t *svp;
	rnode4_t *rp;
	vnode_t *mvp;

	sv_stats.sv_inactive++;

	svp = VTOSV(vp);
	rp = VTOR4(vp);
	mvp = rp->r_vnode;

	ASSERT(mvp != vp);

	/*
	 * Remove the shadow vnode from the list.  The serialization
	 * is provided by the svnode list lock.  This could be done
	 * with the r_statelock, but that would require more locking
	 * in the activation path.
	 */

	mutex_enter(&rp->r_svlock);
	mutex_enter(&vp->v_lock);
	/* check if someone slipped in while locks were dropped */
	if (vp->v_count > 1) {
		vp->v_count--;
		mutex_exit(&vp->v_lock);
		mutex_exit(&rp->r_svlock);
		return;
	}
	remque(svp);
	mutex_exit(&vp->v_lock);
	mutex_exit(&rp->r_svlock);

	sv_uninit(svp);
	svp->sv_forw = svp->sv_back = NULL;
	kmem_cache_free(svnode_cache, svp);
	vn_invalid(vp);
	vn_free(vp);

	/* release the reference held by this shadow on the master */

	VN_RELE(mvp);
}
Esempio n. 27
0
void
timer_queue_shutdown(
	mpqueue_head_t		*queue)
{
	timer_call_t		call;
	mpqueue_head_t		*new_queue;
	spl_t			s;

	DBG("timer_queue_shutdown(%p)\n", queue);

	s = splclock();

	/* Note comma operator in while expression re-locking each iteration */
	while (timer_call_lock_spin(queue), !queue_empty(&queue->head)) {
		call = TIMER_CALL(queue_first(&queue->head));
		if (!simple_lock_try(&call->lock)) {
			/*
			 * case (2b) lock order inversion, dequeue and skip
			 * Don't change the call_entry queue back-pointer
			 * but set the async_dequeue field.
			 */
			timer_queue_shutdown_lock_skips++;
			(void) remque(qe(call));
			call->async_dequeue = TRUE;
			timer_call_unlock(queue);
			continue;
		}

		/* remove entry from old queue */
		timer_call_entry_dequeue(call);
		timer_call_unlock(queue);

		/* and queue it on new */
		new_queue = timer_queue_assign(CE(call)->deadline);
		timer_call_lock_spin(new_queue);
		timer_call_entry_enqueue_deadline(
			call, new_queue, CE(call)->deadline);
		timer_call_unlock(new_queue);

		simple_unlock(&call->lock);
	}

	timer_call_unlock(queue);
	splx(s);
}
Esempio n. 28
0
/*
 * remque and free a socket, clobber cache
 */
void sofree(struct socket *so)
{
  if (so->so_emu==EMU_RSH && so->extra) {
	sofree((struct socket*)so->extra);
	so->extra=NULL;
  }
  if (so == tcp_last_so)
    tcp_last_so = &tcb;
  else if (so == udp_last_so)
    udp_last_so = &udb;
	
  m_free(so->so_m);
	
  if(so->so_next && so->so_prev) 
    remque(so);  /* crashes if so is not in a queue */

  free(so);
}
Esempio n. 29
0
/*
 * Assumes call_entry and queues unlocked, interrupts disabled.
 */
__inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(
	timer_call_t 			call,
	mpqueue_head_t			*queue,
	uint64_t			deadline)
{
	call_entry_t	entry = CE(call);
	mpqueue_head_t	*old_queue;

	DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);

	simple_lock(&call->lock);
	old_queue = MPQUEUE(entry->queue);
	if (old_queue != NULL) {
		timer_call_lock_spin(old_queue);
		if (call->async_dequeue) {
			/* collision (1c): null queue pointer and reset flag */
			call->async_dequeue = FALSE;
			entry->queue = NULL;
#if TIMER_ASSERT
			timer_call_enqueue_deadline_unlocked_async1++;
#endif
		} else if (old_queue != queue) {
			(void)remque(qe(entry));
			entry->queue = NULL;
#if TIMER_ASSERT
			timer_call_enqueue_deadline_unlocked_async2++;
#endif
		}
		if (old_queue != queue) {
			timer_call_unlock(old_queue);
			timer_call_lock_spin(queue);
		}
	} else {
		timer_call_lock_spin(queue);
	}

	timer_call_entry_enqueue_deadline(call, queue, deadline);
	timer_call_unlock(queue);
	simple_unlock(&call->lock);

	return (old_queue);
}
Esempio n. 30
0
static void 
iopa_free(uintptr_t addr, vm_size_t bytes)
{
    io_pagealloc_t * pa;
    uint32_t         count;
    uintptr_t        chunk;

    if (!bytes) bytes = 1;

    chunk = (addr & page_mask);
    assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));

    pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
    assert(kIOPageAllocSignature == pa->signature);

    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
    chunk /= kIOPageAllocChunkBytes;

    IOSimpleLockLock(gIOPageAllocLock);
    if (!pa->avail)
    {
	assert(!pa->link.next);
	enqueue_tail(&gIOPageAllocList, &pa->link);
    }
    pa->avail |= ((-1ULL << (64 - count)) >> chunk);
    if (pa->avail != -2ULL) pa = 0;
    else
    {
        remque(&pa->link);
        pa->link.next = 0;
        pa->signature = 0;
	gIOPageAllocCount--;
    }
    gIOPageAllocBytes -= bytes;
    IOSimpleLockUnlock(gIOPageAllocLock);
    if (pa) iopa_freepage(pa);

#if IOALLOCDEBUG
    debug_iomalloc_size -= bytes;
#endif
    IOStatisticsAlloc(kIOStatisticsFreeAligned, bytes);
}