예제 #1
0
static void
sender_disconnect(void)
{

	rw_wlock(&adist_remote_lock);
	/*
	 * Check for a race between dropping rlock and acquiring wlock -
	 * another thread can close connection in-between.
	 */
	if (adhost->adh_remote == NULL) {
		rw_unlock(&adist_remote_lock);
		return;
	}
	pjdlog_debug(2, "Closing connection to %s.", adhost->adh_remoteaddr);
	proto_close(adhost->adh_remote);
	mtx_lock(&adist_remote_mtx);
	adhost->adh_remote = NULL;
	adhost->adh_reset = true;
	adhost->adh_trail_name[0] = '\0';
	adhost->adh_trail_offset = 0;
	mtx_unlock(&adist_remote_mtx);
	rw_unlock(&adist_remote_lock);

	pjdlog_warning("Disconnected from %s.", adhost->adh_remoteaddr);

	/* Move all in-flight requests back onto free list. */
	mtx_lock(&adist_free_list_lock);
	mtx_lock(&adist_send_list_lock);
	TAILQ_CONCAT(&adist_free_list, &adist_send_list, adr_next);
	mtx_unlock(&adist_send_list_lock);
	mtx_lock(&adist_recv_list_lock);
	TAILQ_CONCAT(&adist_free_list, &adist_recv_list, adr_next);
	mtx_unlock(&adist_recv_list_lock);
	mtx_unlock(&adist_free_list_lock);
}
예제 #2
0
/*
 * Free all the memory collected while the cdev mutex was
 * locked. Since devmtx is after the system map mutex, free() cannot
 * be called immediately and is postponed until cdev mutex can be
 * dropped.
 */
static void
dev_unlock_and_free(void)
{
	struct cdev_priv_list cdp_free;
	struct free_cdevsw csw_free;
	struct cdev_priv *cdp;
	struct cdevsw *csw;

	mtx_assert(&devmtx, MA_OWNED);

	/*
	 * Make the local copy of the list heads while the dev_mtx is
	 * held. Free it later.
	 */
	TAILQ_INIT(&cdp_free);
	TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list);
	csw_free = cdevsw_gt_post_list;
	SLIST_INIT(&cdevsw_gt_post_list);

	mtx_unlock(&devmtx);

	while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) {
		TAILQ_REMOVE(&cdp_free, cdp, cdp_list);
		devfs_free(&cdp->cdp_c);
	}
	while ((csw = SLIST_FIRST(&csw_free)) != NULL) {
		SLIST_REMOVE_HEAD(&csw_free, d_postfree_list);
		free(csw, M_DEVT);
	}
}
예제 #3
0
int Completed_Queue::concat_queue( Operation_Queue *op_queue )
{
	if( op_queue->empty( ) )
		return 0;
#ifdef THREAD_SAFE
	mutex_.acquire( );
#endif
	TAILQ_CONCAT( operations_, op_queue->operations_, field_ );
#ifdef THREAD_SAFE
	condition_.signal( );
	mutex_.release( );
#endif
	return 0;
}
예제 #4
0
/**
 * Move 'cnt' entries from 'srcq' to 'dstq'.
 * If 'cnt' == -1 all entries will be moved.
 * Returns the number of entries moved.
 */
int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq,
			    int cnt, int do_locks) {
	rd_kafka_op_t *rko;
        int mcnt = 0;

        if (do_locks) {
		mtx_lock(&srcq->rkq_lock);
		mtx_lock(&dstq->rkq_lock);
	}

	if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) {
		if (cnt > 0 && dstq->rkq_qlen == 0)
			rd_kafka_q_io_event(dstq);

		/* Optimization, if 'cnt' is equal/larger than all
		 * items of 'srcq' we can move the entire queue. */
		if (cnt == -1 ||
                    cnt >= (int)srcq->rkq_qlen) {
                        rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) ||
                                   srcq->rkq_qlen > 0);
			TAILQ_CONCAT(&dstq->rkq_q, &srcq->rkq_q, rko_link);
			mcnt = srcq->rkq_qlen;
                        dstq->rkq_qlen += srcq->rkq_qlen;
                        dstq->rkq_qsize += srcq->rkq_qsize;
			rd_kafka_q_reset(srcq);
		} else {
			while (mcnt < cnt &&
			       (rko = TAILQ_FIRST(&srcq->rkq_q))) {
				TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
				TAILQ_INSERT_TAIL(&dstq->rkq_q, rko, rko_link);
                                srcq->rkq_qlen--;
                                dstq->rkq_qlen++;
                                srcq->rkq_qsize -= rko->rko_len;
                                dstq->rkq_qsize += rko->rko_len;
				mcnt++;
			}
		}
	} else
		mcnt = rd_kafka_q_move_cnt(dstq->rkq_fwdq ? dstq->rkq_fwdq:dstq,
					   srcq->rkq_fwdq ? srcq->rkq_fwdq:srcq,
					   cnt, do_locks);

	if (do_locks) {
		mtx_unlock(&dstq->rkq_lock);
		mtx_unlock(&srcq->rkq_lock);
	}

	return mcnt;
}
예제 #5
0
/*
 * Stop the queue and flush the entries.
 *
 * This calls the callback with the value of 'flush' before freeing
 * each.
 *
 * If flush=1, then the callback should complete the work and then tidy up
 * If flush=0, then the callback shouldn't complete the work and just tidy up
 */
void
athp_taskq_flush(struct ath10k *ar, int flush)
{
	struct athp_taskq_head *h;
	TAILQ_HEAD(, athp_taskq_entry) te;
	struct athp_taskq_entry *e;

	h = ar->sc_taskq_head;
	if (h == NULL)
		return;

	ath10k_dbg(ar, ATH10K_DBG_TASKQ, "%s: called\n", __func__);

	/* Stop the taskqueue */
	athp_taskq_stop(ar);

	/* Flush whatever entries are on it */
	TAILQ_INIT(&te);
	ATHP_TASKQ_LOCK(h);
	TAILQ_CONCAT(&te, &h->list, node);
	ATHP_TASKQ_UNLOCK(h);

	while ((e = TAILQ_FIRST(&te)) != NULL) {
		TAILQ_REMOVE(&te, e, node);
		e->on_queue = 0;
		ath10k_dbg(ar, ATH10K_DBG_TASKQ,
		    "%s: calling cb %s %p (ptr %p), status=%d\n",
		    __func__,
		    e->cb_str,
		    e->cb,
		    e,
		    flush);
		e->cb(ar, e, flush);
		athp_taskq_entry_free(ar, e);
	}
}
예제 #6
0
파일: copy_gc.c 프로젝트: dec9ue/copy_gc
void perform_gc(struct s_arena* arena){
	struct s_gc s_gc;
	init_gc(&s_gc,arena);

	/* phase 1 mark or evac root */
	evacuate(arena->root,&s_gc);
	
	/* phase 2 process scavenge queue */
	do{
		void* scav_obj;
		scav_obj  = find_small_object(&s_gc);
		if( scav_obj != NULL){
			scavenge_small(scav_obj,&s_gc);
			continue;
		}
		scav_obj  = find_big_object(&s_gc);
		if( scav_obj != NULL){
			scavenge_big(scav_obj,&s_gc);
			continue;
		}
		/* no scavenging object */
		break;
	}while( 1 );

	debug("========================================================\n",NULL);
	debug("%s : small %016x(%d) big %016x(%d)\n",__FUNCTION__,s_gc.small_evaced,s_gc.small_evaced,s_gc.big_evaced,s_gc.big_evaced);
	debug("========================================================\n",NULL);
	
	/* phase 3 adjust objects */
	/* big dead objects : free */
	struct bdescr* block;
	block =  TAILQ_FIRST(&(arena->big_blocks));
	while(block){
		debug("-",NULL);
		struct bdescr* new_block = TAILQ_NEXT(block,link);
		debug("%s : %08x dead big\n",__FUNCTION__,(unsigned int)block);
		/* finalize */
		/* TODO if any finalizer, call here (before small blocks released)*/
		free_big_block(arena,(struct big_bdescr*)block);
		block = new_block;
	}

	/* big live objects : clear used bit & move to arena->blocks */
	TAILQ_INIT(&(arena->big_blocks));
	TAILQ_CONCAT(&(arena->big_blocks),&(s_gc.big_live_queue),link);

	block =  TAILQ_FIRST(&(arena->big_blocks));
	while(block){
		debug("+",NULL);
		struct bdescr* new_block = TAILQ_NEXT(block,link);
		debug("%s : %08x live big\n",__FUNCTION__,(unsigned int)block);
		((struct big_bdescr*)block)-> used = 0;
		block = new_block;
	}

	/* free old spaces */
	block = TAILQ_FIRST(&(arena->blocks));
	while(block){
		debug("-",NULL);
		struct bdescr* next_block = TAILQ_NEXT(block,link);
		free_single_block(arena,(struct single_bdescr*)block);
		debug("%s : %08x dead small\n",__FUNCTION__,(unsigned int)block);
		block = next_block;
	}

	/* move live small area */
	TAILQ_INIT(&(arena->blocks));
	TAILQ_CONCAT(&(arena->blocks),&(s_gc.to_space_queue),link);

#ifdef CGC_DEBUG
	block = TAILQ_FIRST(&(arena->blocks));
	while(block){
		debug("+",NULL);
		struct bdescr* next_block = TAILQ_NEXT(block,link);
		debug("%s : %08x live small\n",__FUNCTION__,(unsigned int)block);
		block = next_block;
	}
#endif
	debug("========================================================\n",NULL);
	debug("%s : end\n",__FUNCTION__);
	debug("========================================================\n",NULL);
}
예제 #7
0
/*
 * pause traffic of a vap from a specified queue.
 * if vap is null all the traffic will be paused.
 */ 
static void ath_tx_vap_pause_txq(struct ath_softc *sc, struct ath_txq *txq, struct ath_vap *avp)
{
   struct ath_buf *bf, *lastbf;
   ath_bufhead bf_head, bf_stage;
   struct ath_node *an,*an_uapsd_head;
   ath_bufhead    vap_mcast_stage_q[ATH_VAPSIZE];         /* temprorary per vap staging queue for cabq traffic */
   struct ath_vap        *mcast_vap_q[ATH_VAPSIZE];
   u_int32_t i;
   struct ieee80211_frame *wh;

   if (txq == sc->sc_cabq) {
       for (i=0;i<ATH_VAPSIZE;++i) {
          TAILQ_INIT(&vap_mcast_stage_q[i]);
          mcast_vap_q[i] = NULL;
       }
   }

   an_uapsd_head=NULL;

   TAILQ_INIT(&bf_stage);
    /*
     * NB: this assumes output has been stopped and
     *     we do not need to block ath_tx_tasklet
     */
    for (;;) {
        ATH_TXQ_LOCK(txq);
        if (sc->sc_enhanceddmasupport) {
            bf = TAILQ_FIRST(&txq->axq_fifo[txq->axq_tailindex]);
            if (bf == NULL) {
                if (txq->axq_headindex != txq->axq_tailindex)
                    printk("ath_tx_draintxq: ERR head %d tail %d\n",
                           txq->axq_headindex, txq->axq_tailindex);
                txq->axq_headindex = 0;
                txq->axq_tailindex = 0;
                ATH_TXQ_UNLOCK(txq);
                break;
            }
        } else {
            bf = TAILQ_FIRST(&txq->axq_q);
            if (bf == NULL) {
                txq->axq_link = NULL;
                txq->axq_linkbuf = NULL;
                ATH_TXQ_UNLOCK(txq);
                break;
            }

            if (bf->bf_status & ATH_BUFSTATUS_STALE) {
                ATH_TXQ_REMOVE_STALE_HEAD(txq, bf, bf_list);
                ATH_TXQ_UNLOCK(txq);
#ifdef ATH_SUPPORT_UAPSD
                if (bf->bf_qosnulleosp) {

                    ath_tx_uapsdqnulbf_complete(sc, bf, false);

                } else
#endif
                {
                    ATH_TXBUF_LOCK(sc);
                    sc->sc_txbuf_free++;
#if ATH_TX_BUF_FLOW_CNTL
					if(bf) {
                    txq->axq_num_buf_used--;
					}
#endif
                    TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
                    
#if TRACE_TX_LEAK                
                    TAILQ_REMOVE(&sc->sc_tx_trace_head,bf,bf_tx_trace_list);
#endif //TRACE_TX_LEAK

                    ATH_TXBUF_UNLOCK(sc);
#if ATH_SUPPORT_FLOWMAC_MODULE
                    if (sc->sc_osnetif_flowcntrl) {
                        ath_netif_wake_queue(sc);
                    }
#endif
                }
                continue;
            }
        }

        lastbf = bf->bf_lastbf;
 
        TAILQ_INIT(&bf_head);

        /* remove ath_buf's of the same mpdu from txq */
        if (sc->sc_enhanceddmasupport) {
             if (txq == sc->sc_cabq || txq == sc->sc_uapsdq) {
                 ATH_EDMA_MCASTQ_MOVE_HEAD_UNTIL(txq, &bf_head, lastbf, bf_list);
             } else {
                 ATH_EDMA_TXQ_MOVE_HEAD_UNTIL(txq, &bf_head, lastbf, bf_list);
             }
        } else {
            ATH_TXQ_MOVE_HEAD_UNTIL(txq, &bf_head, lastbf, bf_list);
        }

        txq->axq_totalqueued --;                                      

        if (bf->bf_isaggr) {
            txq->axq_aggr_depth--;
        }
#if ATH_SUPPORT_CFEND
        if (txq == sc->sc_cfendq) {
            /* process CF End packet */
            if (bf->bf_state.bfs_iscfend) {
                ath_tx_cfend_complete (sc, bf, &bf_head);
                ATH_TXQ_UNLOCK(txq);
                continue; /* process rest of the buffers */
            }
        }
#endif


        ATH_TXQ_UNLOCK(txq);

#ifdef AR_DEBUG
        if (!sc->sc_enhanceddmasupport && CHK_SC_DEBUG(sc, ATH_DEBUG_RESET))
            /* Legacy only as the enhanced DMA txprocdesc() 
             * will move the tx status ring tail pointer.
             */
            ath_printtxbuf(bf, ath_hal_txprocdesc(sc->sc_ah, bf->bf_desc) == HAL_OK);
#endif /* AR_DEBUG */

        an = bf->bf_node;
        /*
         * if the node belongs to the vap being paused (or) if the request
         * is to pause all vaps (avp is NULL)
         * then put it back in to the nodes queue.
         */
        if (!avp || (avp && an->an_avp == avp) ) {
#ifdef ATH_SUPPORT_UAPSD
            if (txq == sc->sc_uapsdq) {
                /* 
                 * if the node is not on the UAPSD node list then put it on the list.
                 * alwasys put it on the head of the list.
                 */
                if (!an->an_temp_next && (an != an_uapsd_head)) {
                    if(an_uapsd_head){
                        an->an_temp_next = an_uapsd_head;
                    }
                    an_uapsd_head = an;
                }
                if (TAILQ_FIRST(&bf_head) == NULL ) {
                    DPRINTF(sc, ATH_DEBUG_ANY,"#####%s : %d  bf_head is empty \n",__func__, __LINE__);
                } else {
                    ath_tx_stage_queue_uapsd(sc,an, &bf_head);
                }
                continue;
            }

#endif
            if (txq == sc->sc_cabq) {
                ath_bufhead    *mcast_stage_q = NULL;
                /* 
                 * get the mcast staging queue for this vap and 
                 * add the frame to the mcast staging queue.
                 */

                for (i=0;i<ATH_VAPSIZE;++i) {
                   if (mcast_vap_q[i] == avp) {
                       mcast_stage_q =  &vap_mcast_stage_q[i]; 
                   } else if (mcast_vap_q[i] == NULL) {
                       mcast_stage_q =  &vap_mcast_stage_q[i]; 
                       mcast_vap_q[i] = avp;
                   }
                   if (mcast_stage_q ) {
                       break;
                   }
                }

                if (mcast_stage_q == NULL) {
                   DPRINTF(sc, ATH_DEBUG_ANY, "%s: mcat_stage_q is NULL \n", __func__);
                   continue; 
                }

                TAILQ_CONCAT(mcast_stage_q, &bf_head, bf_list);                   
                continue;
            }

            if (bf->bf_isampdu) {
                if (!bf->bf_isaggr) {
                    __11nstats(sc,tx_unaggr_comperror);
                }
                ath_tx_mark_aggr_rifs_done(sc, txq, bf, &bf_head,
                                          &((struct ath_desc *)(lastbf->bf_desc))->ds_txstat, 0);
            } else {
#ifdef ATH_SWRETRY
                if (sc->sc_enhanceddmasupport) {
                    /*
                     * Decrement of swr_num_eligible_frms for AMPDU is done
                     * above in ath_tx-complete_aggr_rifs.
                     */
                    if (!bf->bf_isampdu && bf->bf_isdata) {
                        struct ath_node *an = bf->bf_node;
                        if (an) {
                            struct ath_swretry_info *pInfo = &an->an_swretry_info[txq->axq_qnum];
                            ATH_NODE_SWRETRY_TXBUF_LOCK(an);
                            ASSERT(pInfo->swr_num_eligible_frms);
                            pInfo->swr_num_eligible_frms --;
                            ATH_NODE_SWRETRY_TXBUF_UNLOCK(an);
                        }
                    }
                }
#endif
                if (bf->bf_isbar) {
                    DPRINTF(sc, ATH_DEBUG_RESET, "*****%s: BAR frame \n", __func__);
#ifdef ATH_SUPPORT_TxBF
                    ath_tx_complete_buf(sc, bf, &bf_head, 0, 0, 0);
#else
                    ath_tx_complete_buf(sc, bf, &bf_head, 0);
#endif
                } else {
                    /*
                     *  Non Aggregates, put them at the head of the tid queue (if node is still avail,)
                     */

                    atomic_inc(&an->an_active_tx_cnt);
                    /* Make sure that Node is still alive and not temporary node */
                    if ((an->an_flags & (ATH_NODE_TEMP | ATH_NODE_CLEAN)) == 0) {
                        struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
                        TAILQ_INSERTQ_HEAD(&tid->buf_q, &bf_head, bf_list);
                        atomic_dec(&an->an_active_tx_cnt);
                    }
                    else {
                        if ((an->an_flags & ATH_NODE_TEMP) != 0) {
                            DPRINTF(sc, ATH_DEBUG_ANY, "%s: an=0x%p is Temp-node.\n", __func__, an);
                        }
                        if ((an->an_flags & ATH_NODE_CLEAN) != 0) {
                            DPRINTF(sc, ATH_DEBUG_ANY, "%s: an=0x%p is already CLEAN.\n", __func__, an);
                        }

                        atomic_dec(&an->an_active_tx_cnt);
                        
                        // Free these buffers.
#ifdef ATH_SUPPORT_TxBF
                        ath_tx_complete_buf(sc, bf, &bf_head, 0, 0, 0);
#else
                        ath_tx_complete_buf(sc, bf, &bf_head, 0);
#endif
                    }
                }
            }
        } else {
            /*
             * if the frame does not need to be paused
             * then put it on to a staging queue.
             */
            TAILQ_CONCAT(&bf_stage, &bf_head, bf_list);                   
        }
    }

#ifdef ATH_SUPPORT_UAPSD
    while(an_uapsd_head) {
        an=an_uapsd_head;
        an_uapsd_head = an->an_temp_next;
        an->an_temp_next=NULL;
        ath_tx_prepend_uapsd_stage_queue(sc,an);
    }
#endif
    /* prepend the staging queue back to vap mcast queue */
    if (txq == sc->sc_cabq) {
        ath_bufhead    *mcast_stage_q = NULL;

        for (i=0;i<ATH_VAPSIZE;++i) {
           mcast_stage_q =  &vap_mcast_stage_q[i]; 
           /*
            * prepend only if the mcast staging queue is not empty
            */ 
           if (TAILQ_FIRST(mcast_stage_q))   { 
              /*
               * need to prepend the frames from staging queue to the vap mcast queue.
               * do it in 2 steps.
               * move the frames from the vap mcast queue to the
               * end of the staging queue and move all the frames from staging queue 
               * to the vaps mcast queue.
               */ 
              TAILQ_CONCAT(mcast_stage_q, &mcast_vap_q[i]->av_mcastq.axq_q, bf_list);
              mcast_vap_q[i]->av_mcastq.axq_depth=0; 
              mcast_vap_q[i]->av_mcastq.axq_totalqueued = 0;                        
              mcast_vap_q[i]->av_mcastq.axq_linkbuf = 0;                        
              mcast_vap_q[i]->av_mcastq.axq_link = NULL;                        

              bf = TAILQ_FIRST(mcast_stage_q);
              while (bf) {
                  /*
                   * Remove a single ath_buf from the staging  queue and add it to
                   * the mcast queue.
                   */
                  lastbf = bf->bf_lastbf;

                   wh = (struct ieee80211_frame *)wbuf_header(bf->bf_mpdu);
                  
                   DPRINTF(sc, ATH_DEBUG_ANY, "%s: queue mcast frame back seq # %d \n", __func__,
                        le16toh(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT);
                  TAILQ_REMOVE_HEAD_UNTIL(mcast_stage_q, &bf_head, lastbf, bf_list);
                  if (ath_tx_mcastqaddbuf(sc, mcast_vap_q[i], &bf_head) != EOK) {
                      /* failed to queue the buf, complete it with an error */
#ifdef ATH_SUPPORT_TxBF
                      ath_tx_complete_buf(sc,bf,&bf_head,0,0, 0);
#else
                      ath_tx_complete_buf(sc,bf,&bf_head,0);
#endif
                  }

                  bf = TAILQ_FIRST(mcast_stage_q);
              }
           }
        }
    }
예제 #8
0
파일: cstuff.c 프로젝트: fluxid/flantob
static inline void cleanup_d(queue_d *queue) {
	if (TAILQ_EMPTY(queue)) return;
	TAILQ_CONCAT(&free_entries_d, queue, hook);
}