int32_t flush_pkt_reqid(struct my_context * mcontext,struct pkt_controls * pkt_ctls){ struct pkt_descriptor * pktd = NULL; struct pkt_descriptor * last_pktd = NULL; struct pkt_descriptor * first_pktd = NULL; uint64_t ref_req_id = 0; uint32_t error_val = -ECANCELED; uint32_t error_return; if (NULL == pkt_ctls-> pkt_reqid_list) return 0; SPIN_LOCK(pkt_ctls->pkt_list_lock); pktd = pkt_ctls-> pkt_reqid_list; pkt_ctls-> pkt_reqid_list = NULL; //detach the list SPIN_UNLOCK(pkt_ctls->pkt_list_lock); if ( unlikely(NULL==pktd) ) return 0; first_pktd = pktd; while (pktd != NULL){ last_pktd = pktd; //will eventually be the last in the list ref_req_id=pktd->req_id; pktd->req_id=0; //$$$ PRINT(">>ECANCELED packet RequestID= %p \n",(void*)ref_req_id); //if (ref_req_id) (mcontext->status)( (void*)ref_req_id, error_val, mcontext->callback_context); if (ref_req_id) (mcontext->status)( (void*)&ref_req_id, &error_val,mcontext->callback_context,&error_return,1); pktd = pktd->next; } //move the list to the free list now SPIN_LOCK(pkt_ctls->pkt_list_lock); last_pktd->next = pkt_ctls->pkt_free_list; pkt_ctls->pkt_free_list = first_pktd; SPIN_UNLOCK(pkt_ctls->pkt_list_lock); return 0; };
/** * This function completes a request. It call's the request call back. */ void request_done(dwc_otg_pcd_ep_t *_ep, dwc_otg_pcd_request_t *_req, int _status) { unsigned stopped = _ep->stopped; DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep); list_del_init(&_req->queue); if (_req->req.status == -EINPROGRESS) { _req->req.status = _status; } else { _status = _req->req.status; } /* don't modify queue heads during completion callback */ _ep->stopped = 1; SPIN_UNLOCK(&_ep->pcd->lock); _req->req.complete(&_ep->ep, &_req->req); SPIN_LOCK(&_ep->pcd->lock); if (_ep->pcd->request_pending > 0) { --_ep->pcd->request_pending; } _ep->stopped = stopped; }
void check4notRight(struct pkt_controls * pkt_ctls,BG_FlightRecorderRegistry_t* logregistry){ uint64_t expected = pkt_ctls->num_pkts; uint64_t free_num = 0; uint64_t reqid_num = 0; //uint64_t backwards_reqid_num = 0; struct pkt_descriptor * pktd = NULL; //LOCK SPIN_LOCK(pkt_ctls->pkt_list_lock); pktd = pkt_ctls->pkt_free_list; while (pktd){ free_num++; pktd = pktd->next; if (free_num > expected){ break; } } pktd = pkt_ctls->pkt_reqid_list; while (pktd){ reqid_num++; pktd = pktd->next; if (reqid_num > expected){ DB_PKT_CHECK(logregistry,pkt_ctls,expected,free_num,reqid_num); break; } } PKT_CHECK(logregistry,pkt_ctls,expected,free_num,reqid_num); //UNLOCK SPIN_UNLOCK(pkt_ctls->pkt_list_lock); }
static void timer_add(struct timer *T,void *arg,size_t sz,int time) { struct timer_node *node = (struct timer_node *)skynet_malloc(sizeof(*node)+sz); memcpy(node+1,arg,sz); SPIN_LOCK(T); node->expire=time+T->time; add_node(T,node); SPIN_UNLOCK(T); }
/* 分发此时到期的定时事件, 此函数会以线程安全的方式不断监测是否有新添加进来的当前时间就过期的定时事件. * 如果检查到就一并分发, 如果检查不到就会留至下一次分发时分发. */ static inline void timer_execute(struct timer *T) { int idx = T->time & TIME_NEAR_MASK; while (T->near[idx].head.next) { struct timer_node *current = link_clear(&T->near[idx]); SPIN_UNLOCK(T); // dispatch_list don't need lock T dispatch_list(current); SPIN_LOCK(T); } }
/* 分配内存并构造一个定时触发节点, 并添加到定时器管理器中, 添加算法参见 add_node 函数. * 参数 T 为定时器管理器, arg 为定时器事件, sz 为定时器事件结构的大小, time 为触发时间距离现在的距离. * 此函数是线程安全的. */ static void timer_add(struct timer *T,void *arg,size_t sz,int time) { struct timer_node *node = (struct timer_node *)skynet_malloc(sizeof(*node)+sz); memcpy(node+1,arg,sz); SPIN_LOCK(T); /* 读取管理器中的当前时间和操作触发列表集都是非线程安全的, 因而需要在同步块中执行. */ node->expire=time+T->time; add_node(T,node); SPIN_UNLOCK(T); }
void release_pkt_to_poll(struct pkt_descriptor * pktd, uint64_t desc_count){ ENTER; pktd->desc_cnt = desc_count; pktd->timestamp = GetTimeBase2(); //PRINT("reg_id = %p \n", (void *)pktd->req_id); SPIN_LOCK(pktd->pkt_control->pkt_list_lock); pktd->prev = NULL; pktd->next = pktd->pkt_control-> pkt_reqid_list; if (pktd->pkt_control-> pkt_reqid_list != NULL) pktd->pkt_control-> pkt_reqid_list->prev=pktd; pktd->pkt_control-> pkt_reqid_list = pktd; SPIN_UNLOCK(pktd->pkt_control->pkt_list_lock); EXIT; };
static inline int grtm_request_txlock_isr(struct grtm_priv *pDev) { SPIN_ISR_IRQFLAGS(irqflags); int got_lock = 0; SPIN_LOCK(&pDev->devlock, irqflags); if (pDev->handling_transmission == 0) { pDev->handling_transmission = 1; got_lock = 1; } SPIN_UNLOCK(&pDev->devlock, irqflags); return got_lock; }
static void timer_update(struct timer *T) { SPIN_LOCK(T); // try to dispatch timeout 0 (rare condition) timer_execute(T); // shift time first, and then dispatch timer message timer_shift(T); timer_execute(T); SPIN_UNLOCK(T); }
post_id_t sys_open (void){ post_id_t p; SPIN_LOCK(alloc_post); p = alloc_post(); Posts[p].owner = get_current_tid(); Posts[p].handler = NULL; Posts[p].received = create_list(); SPIN_UNLOCK(alloc_post); return p; }
/** * This function completes a request. It call's the request call back. */ void request_done(dwc_otg_pcd_ep_t * _ep, dwc_otg_pcd_request_t * _req, int _status) { unsigned stopped = _ep->stopped; DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep); if (_req->mapped) { dma_unmap_single(_ep->pcd->gadget.dev.parent, _req->req.dma, _req->req.length, _ep->dwc_ep.is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); _req->req.dma = DMA_ADDR_INVALID; _req->mapped = 0; } else dma_sync_single_for_cpu(_ep->pcd->gadget.dev.parent, _req->req.dma, _req->req.length, _ep->dwc_ep.is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); list_del_init(&_req->queue); if (_req->req.status == -EINPROGRESS) { _req->req.status = _status; } else { _status = _req->req.status; } /* don't modify queue heads during completion callback */ _ep->stopped = 1; SPIN_UNLOCK(&_ep->pcd->lock); _req->req.complete(&_ep->ep, &_req->req); SPIN_LOCK(&_ep->pcd->lock); if (_ep->pcd->request_pending > 0) { --_ep->pcd->request_pending; } _ep->stopped = stopped; #ifdef CONFIG_405EZ /* * Added-sr: 2007-07-26 * * Finally, when the current request is done, mark this endpoint * as not active, so that new requests can be processed. */ _ep->dwc_ep.active = 0; #endif }
/* 更新当前时间并分发所有已经到期的定时事件. 此函数是线程安全的. */ static void timer_update(struct timer *T) { SPIN_LOCK(T); /* 第一件做的事情是分发当前时间未更新的情况下到期的定时事件, 这些事件 是在上次分发运行中添加进来而未检查到的. 如果不这样做, 它们将永远丢失. */ // try to dispatch timeout 0 (rare condition) timer_execute(T); /* 然后才是更新当前时间, 重新安排层级列表集中的定时触发事件, 并分发此时到期的定时事件 */ // shift time first, and then dispatch timer message timer_shift(T); timer_execute(T); SPIN_UNLOCK(T); }
void bootloader_mp_ap_init(void) { t_uint32 apicid; t_sint32 id; /* * 1) */ SPIN_LOCK(spin); /* * 2) */ apicid = apic_id(); if ((id = bootloader_add_cpu(apicid)) >= 0) { bootloader_cons_msg('#', " AP-%d APIC ID: %d\n", init->ncpus, apicid); apic_enable(); bootloader_pmode_ap_init(); bootloader_paging_ap_init(); bootloader_interrupt_ap_init(); } /* * 3) */ SPIN_UNLOCK(spin); /* * 4) */ // HLT(); XXX why not working ? while(1) ; }
struct pkt_descriptor * alloc_pkt_message(struct pkt_controls * pkt_ctls){ struct pkt_descriptor * pktd = NULL; ENTER; SPIN_LOCK(pkt_ctls->pkt_list_lock); pktd = pkt_ctls-> pkt_free_list; if (pktd != NULL) pkt_ctls-> pkt_free_list = pktd->next; SPIN_UNLOCK(pkt_ctls->pkt_list_lock); // force poll? attempt to take this descriptor if packet got processed and no poll active? if (pktd==NULL){ PRINT("pktd NULL return in %s %d \n",__FILE__,__LINE__); return NULL; } pktd->req_id=0; pktd->error_val=0; pktd->flightinfo4delay = NULL; pktd->timestamp = 0; pktd->desc_cnt= -1; /* largest possible value */ pktd->ccontext=NULL; EXIT; return pktd; };
/** * This function completes a request. It call's the request call back. */ void request_done(dwc_otg_pcd_ep_t * _ep, dwc_otg_pcd_request_t * _req, int _status) { unsigned stopped = _ep->stopped; dwc_otg_core_if_t *core_if = GET_CORE_IF(_ep->pcd); DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep); if(_ep->dwc_ep.num && _ep->dwc_ep.is_in) list_del_init(&_req->pcd_queue); list_del_init(&_req->queue); if (_req->req.status == -EINPROGRESS) { _req->req.status = _status; } else { _status = _req->req.status; } /* don't modify queue heads during completion callback */ _ep->stopped = 1; SPIN_UNLOCK(&_ep->pcd->lock); _req->req.complete(&_ep->ep, &_req->req); SPIN_LOCK(&_ep->pcd->lock); if (_ep->pcd->request_pending > 0) { --_ep->pcd->request_pending; } _ep->stopped = stopped; if(_ep->dwc_ep.is_in && _ep->dwc_ep.num){ DWC_DEBUGPL(DBG_PCDV, "ep%d,len=%d\n",_ep->dwc_ep.num,_req->req.actual); _ep->pcd->ep_in_sync = 0; } if(core_if->dma_enable) dwc_otg_pcd_dma_unmap(&_ep->dwc_ep); }
shardcache_node_t * shardcache_node_select(shardcache_t *cache, char *label) { shardcache_node_t *node = NULL; int i; for (i = 0; i < cache->num_shards; i++ ){ if (strcmp(cache->shards[i]->label, label) == 0) { node = cache->shards[i]; break; } } SPIN_LOCK(cache->migration_lock); if (cache->migration && !node) { for (i = 0; i < cache->num_migration_shards; i++) { if (strcmp(cache->migration_shards[i]->label, label) == 0) { node = cache->migration_shards[i]; break; } } } SPIN_UNLOCK(cache->migration_lock); return node; }
/*! \brief This function completes a request. It call's the request call back. */ static void request_done_ep0(ifxpcd_request_t *_ifxreq, int _status) { IFX_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__,_ifxreq); if(!_ifxreq) { IFX_ERROR("%s() %d invalid _ifxreq\n",__func__,__LINE__); return; } _ifxreq->sysreq.status = _status; list_del_init(&_ifxreq->trq); #ifdef __DO_PCD_UNLOCK__ SPIN_UNLOCK(&ifxusb_pcd.lock); #endif if(_ifxreq->sysreq.complete) _ifxreq->sysreq.complete(&ifxusb_pcd.ifxep[0].sysep, &_ifxreq->sysreq); #ifdef __DO_PCD_UNLOCK__ SPIN_LOCK(&ifxusb_pcd.lock); #endif }
irqreturn_t handler(int interruptNumber, void *devId) { struct xordev *dev = byInteruptNumber(interruptNumber); int state = DEVICE_WORKING; if (dev != devId) { printk(KERN_INFO "xordev: Interrupt from different device"); return IRQ_NONE; } SPIN_LOCK(dev); if (DEVICE_WORKING == *(dev->deviceState)) { *(dev->deviceState) = DEVICE_DONE; state = DEVICE_DONE; } SPIN_UNLOCK(dev); if (DEVICE_DONE == state) { iowrite32((u32) 0, dev->bar0 + 0x00); /* Interrupts disabled */ printk(KERN_INFO "xordev: Finished xoring. Interrupt."); wake_up_interruptible(dev->waitSource1); wake_up_interruptible(dev->waitSource2); wake_up_interruptible(dev->waitDestination); } else { printk(KERN_INFO "xordev: Interrupt, but not finished xoring."); } return IRQ_HANDLED; }
void request_done(ifxpcd_ep_t *_ifxep, ifxpcd_request_t *_ifxreq, int _status) { IFX_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ifxep,_ifxreq); if(!_ifxep) { IFX_ERROR("%s() %d invalid _ifxep\n",__func__,__LINE__); return; } if(!_ifxreq) { IFX_ERROR("%s() %d invalid _ifxreq\n",__func__,__LINE__); return; } if(_ifxep->num==0) { request_done_ep0(_ifxreq, _status); return; } _ifxreq->sysreq.status = _status; if(_ifxep->type==IFXUSB_EP_TYPE_INTR) { list_del_init(&_ifxreq->trq); if(_ifxreq->sysreq.complete) { #ifdef __DO_PCD_UNLOCK__ SPIN_UNLOCK(&ifxusb_pcd.lock); #endif _ifxreq->sysreq.complete(&_ifxep->sysep, &_ifxreq->sysreq); #ifdef __DO_PCD_UNLOCK__ SPIN_LOCK(&ifxusb_pcd.lock); #endif } } else if(_ifxep->is_in) // Tx { #if defined(__GADGET_TASKLET_TX__) list_del_init(&_ifxreq->trq); list_add_tail(&_ifxreq->trq, &_ifxep->queue_cmpt); if(!_ifxreq->sysreq.no_interrupt && !_ifxep->cmpt_tasklet_in_process) { #ifdef __GADGET_TASKLET_HIGH__ tasklet_hi_schedule(&_ifxep->cmpt_tasklet); #else tasklet_schedule(&_ifxep->cmpt_tasklet); #endif } #else list_del_init(&_ifxreq->trq); if(!_ifxreq->sysreq.no_interrupt) { while (!list_empty(&_ifxep->queue_cmpt)) { ifxpcd_request_t *req; req = list_entry(_ifxep->queue_cmpt.next, ifxpcd_request_t,trq); list_del_init(&req->trq); if(req->sysreq.complete) { #ifdef __DO_PCD_UNLOCK__ SPIN_UNLOCK(&ifxusb_pcd.lock); #endif req->sysreq.complete(&_ifxep->sysep, &req->sysreq); #ifdef __DO_PCD_UNLOCK__ SPIN_LOCK(&ifxusb_pcd.lock); #endif } else { #ifdef __req_num_dbg__ IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid); #else IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req); #endif } } if(_ifxreq->sysreq.complete) { #ifdef __DO_PCD_UNLOCK__ SPIN_UNLOCK(&ifxusb_pcd.lock); #endif _ifxreq->sysreq.complete(&_ifxep->sysep, &_ifxreq->sysreq); #ifdef __DO_PCD_UNLOCK__ SPIN_LOCK(&ifxusb_pcd.lock); #endif } else { #ifdef __req_num_dbg__ IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, _ifxreq->reqid); #else IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, _ifxreq); #endif } } else list_add_tail(&_ifxreq->trq, &_ifxep->queue_cmpt); #endif } else // Rx { #if defined(__GADGET_TASKLET_RX__) if(list_empty(&_ifxep->queue)) // Rx Empty, Reuse { _ifxreq->sysreq.actual=0; _ifxreq->sysreq.status=0; } else if(!_ifxreq->sysreq.no_interrupt && !_ifxep->cmpt_tasklet_in_process) { _ifxep->cmpt_tasklet_in_process=1; list_move_tail(&_ifxreq->trq, &_ifxep->queue_cmpt); #ifdef __GADGET_TASKLET_HIGH__ tasklet_hi_schedule(&_ifxep->cmpt_tasklet); #else tasklet_schedule(&_ifxep->cmpt_tasklet); #endif } else list_move_tail(&_ifxreq->trq, &_ifxep->queue_cmpt); #else if(!_ifxreq->sysreq.no_interrupt) { ifxpcd_request_t *req2; while (!list_empty(&_ifxep->queue_cmpt)) { req = list_entry(_ifxep->queue_cmpt.next, ifxpcd_request_t,trq); list_del_init(&req->trq); if(req->sysreq.complete) { #ifdef __DO_PCD_UNLOCK__ SPIN_UNLOCK(&ifxusb_pcd.lock); #endif req->sysreq.complete(&_ifxep->sysep, &req->sysreq); #ifdef __DO_PCD_UNLOCK__ SPIN_LOCK(&ifxusb_pcd.lock); #endif } else { #ifdef __req_num_dbg__ IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid); #else IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req); #endif } } if(list_empty(&_ifxep->queue) // Rx Empty, Reuse { _ifxreq->sysreq.actual=0; _ifxreq->sysreq.status=0; list_add_tail(&_ifxreq->trq, &_ifxep->queue); } else { if(_ifxreq->sysreq.complete) { #ifdef __DO_PCD_UNLOCK__ SPIN_UNLOCK(&ifxusb_pcd.lock); #endif _ifxreq->sysreq.complete(&_ifxep->sysep, &_ifxreq->sysreq); #ifdef __DO_PCD_UNLOCK__ SPIN_LOCK(&ifxusb_pcd.lock); #endif } else { #ifdef __req_num_dbg__ IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid); #else IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req); #endif } } }
/** * This function handles the OTG Interrupts. It reads the OTG * Interrupt Register (GOTGINT) to determine what interrupt has * occurred. * * @param core_if Programming view of DWC_otg controller. */ int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *core_if) { dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; gotgint_data_t gotgint; gotgctl_data_t gotgctl; gintmsk_data_t gintmsk; gotgint.d32 = dwc_read_reg32(&global_regs->gotgint); gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32, op_state_str(core_if)); //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32); if (gotgint.b.sesenddet) { DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "Session End Detected++ (%s)\n", op_state_str(core_if)); gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); if (core_if->op_state == B_HOST) { dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_start(core_if); SPIN_UNLOCK(&pcd->lock); core_if->op_state = B_PERIPHERAL; } else { dwc_otg_pcd_t *pcd; /* If not B_HOST and Device HNP still set. HNP * Did not succeed!*/ if (gotgctl.b.devhnpen) { DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); DWC_ERROR("Device Not Connected/Responding!\n"); } /* If Session End Detected the B-Cable has * been disconnected. */ /* Reset PCD and Gadget driver to a * clean state. */ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_stop(core_if); SPIN_UNLOCK(&pcd->lock); } gotgctl.d32 = 0; gotgctl.b.devhnpen = 1; dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); } if (gotgint.b.sesreqsucstschng) { DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "Session Reqeust Success Status Change++\n"); gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); if (gotgctl.b.sesreqscs) { if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && (core_if->core_params->i2c_enable)) { core_if->srp_success = 1; } else { dwc_otg_pcd_t *pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_resume(core_if); SPIN_UNLOCK(&pcd->lock); /* Clear Session Request */ gotgctl.d32 = 0; gotgctl.b.sesreq = 1; dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); } } } if (gotgint.b.hstnegsucstschng) { /* Print statements during the HNP interrupt handling * can cause it to fail.*/ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); if (gotgctl.b.hstnegscs) { if (dwc_otg_is_host_mode(core_if)) { dwc_otg_pcd_t *pcd; core_if->op_state = B_HOST; /* * Need to disable SOF interrupt immediately. * When switching from device to host, the PCD * interrupt handler won't handle the * interrupt if host mode is already set. The * HCD interrupt handler won't get called if * the HCD state is HALT. This means that the * interrupt does not get handled and Linux * complains loudly. */ gintmsk.d32 = 0; gintmsk.b.sofintr = 1; dwc_modify_reg32(&global_regs->gintmsk, gintmsk.d32, 0); pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_stop(core_if); SPIN_UNLOCK(&pcd->lock); /* * Initialize the Core for Host mode. */ hcd_start(core_if); core_if->op_state = B_HOST; } } else { gotgctl.d32 = 0; gotgctl.b.hnpreq = 1; gotgctl.b.devhnpen = 1; dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); DWC_DEBUGPL(DBG_ANY, "HNP Failed\n"); DWC_ERROR("Device Not Connected/Responding\n"); } } if (gotgint.b.hstnegdet) { /* The disconnect interrupt is set at the same time as * Host Negotiation Detected. During the mode * switch all interrupts are cleared so the disconnect * interrupt handler will not get executed. */ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "Host Negotiation Detected++ (%s)\n", (dwc_otg_is_host_mode(core_if)?"Host":"Device")); if (dwc_otg_is_device_mode(core_if)){ dwc_otg_pcd_t *pcd; DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n", core_if->op_state); hcd_disconnect(core_if); pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_start(core_if); SPIN_UNLOCK(&pcd->lock); core_if->op_state = A_PERIPHERAL; } else { dwc_otg_pcd_t *pcd; /* * Need to disable SOF interrupt immediately. When * switching from device to host, the PCD interrupt * handler won't handle the interrupt if host mode is * already set. The HCD interrupt handler won't get * called if the HCD state is HALT. This means that * the interrupt does not get handled and Linux * complains loudly. */ gintmsk.d32 = 0; gintmsk.b.sofintr = 1; dwc_modify_reg32(&global_regs->gintmsk, gintmsk.d32, 0); pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_stop(core_if); SPIN_UNLOCK(&pcd->lock); hcd_start(core_if); core_if->op_state = A_HOST; } } if (gotgint.b.adevtoutchng) { DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "A-Device Timeout Change++\n"); } if (gotgint.b.debdone) { DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "Debounce Done++\n"); } /* Clear GOTGINT */ dwc_write_reg32 (&core_if->core_global_regs->gotgint, gotgint.d32); return 1; }
void w_conn_id_status_change(struct work_struct *p) { dwc_otg_core_if_t *core_if = container_of(p, dwc_otg_core_if_t, w_conn_id); uint32_t count = 0; gotgctl_data_t gotgctl = { .d32 = 0 }; gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); /* B-Device connector (Device Mode) */ if (gotgctl.b.conidsts) { dwc_otg_pcd_t *pcd; /* Wait for switch to device mode. */ while (!dwc_otg_is_device_mode(core_if)){ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); MDELAY(100); if (++count > 10000) *(uint32_t*)NULL=0; } core_if->op_state = B_PERIPHERAL; dwc_otg_core_init(core_if); dwc_otg_enable_global_interrupts(core_if); pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_start(core_if); SPIN_UNLOCK(&pcd->lock); } else { /* A-Device connector (Host Mode) */ while (!dwc_otg_is_host_mode(core_if)) { DWC_PRINT("Waiting for Host Mode, Mode=%s\n", (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); MDELAY(100); if (++count > 10000) *(uint32_t*)NULL=0; } core_if->op_state = A_HOST; /* * Initialize the Core for Host mode. */ dwc_otg_core_init(core_if); dwc_otg_enable_global_interrupts(core_if); hcd_start(core_if); } } /** * This function handles the Connector ID Status Change Interrupt. It * reads the OTG Interrupt Register (GOTCTL) to determine whether this * is a Device to Host Mode transition or a Host Mode to Device * Transition. * * This only occurs when the cable is connected/removed from the PHY * connector. * * @param core_if Programming view of DWC_otg controller. */ int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *core_if) { /* * Need to disable SOF interrupt immediately. If switching from device * to host, the PCD interrupt handler won't handle the interrupt if * host mode is already set. The HCD interrupt handler won't get * called if the HCD state is HALT. This means that the interrupt does * not get handled and Linux complains loudly. */ gintmsk_data_t gintmsk = { .d32 = 0 }; gintsts_data_t gintsts = { .d32 = 0 }; gintmsk.b.sofintr = 1; dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0); DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", (dwc_otg_is_host_mode(core_if)?"Host":"Device")); /* * Need to schedule a work, as there are possible DELAY function calls */ queue_work(core_if->wq_otg, &core_if->w_conn_id); /* Set flag and clear interrupt */ gintsts.b.conidstschng = 1; dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); return 1; } /** * This interrupt indicates that a device is initiating the Session * Request Protocol to request the host to turn on bus power so a new * session can begin. The handler responds by turning on bus power. If * the DWC_otg controller is in low power mode, the handler brings the * controller out of low power mode before turning on bus power. * * @param core_if Programming view of DWC_otg controller. */ int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t *core_if) { hprt0_data_t hprt0; gintsts_data_t gintsts; #ifndef DWC_HOST_ONLY DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); if (dwc_otg_is_device_mode(core_if)) { DWC_PRINT("SRP: Device mode\n"); } else { DWC_PRINT("SRP: Host mode\n"); /* Turn on the port power bit. */ hprt0.d32 = dwc_otg_read_hprt0(core_if); hprt0.b.prtpwr = 1; dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); /* Start the Connection timer. So a message can be displayed * if connect does not occur within 10 seconds. */ hcd_session_start(core_if); } #endif /* Clear interrupt */ gintsts.d32 = 0; gintsts.b.sessreqintr = 1; dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); return 1; } void w_wakeup_detected(struct work_struct *p) { struct delayed_work *dw = container_of(p, struct delayed_work, work); dwc_otg_core_if_t *core_if = container_of(dw, dwc_otg_core_if_t, w_wkp); /* * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms * so that OPT tests pass with all PHYs). */ hprt0_data_t hprt0 = {.d32=0}; hprt0.d32 = dwc_otg_read_hprt0(core_if); DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); // MDELAY(70); hprt0.b.prtres = 0; /* Resume */ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(core_if->host_if->hprt0)); } /** * This interrupt indicates that the DWC_otg controller has detected a * resume or remote wakeup sequence. If the DWC_otg controller is in * low power mode, the handler must brings the controller out of low * power mode. The controller automatically begins resume * signaling. The handler schedules a time to stop resume signaling. */ int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t *core_if) { gintsts_data_t gintsts; DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); if (dwc_otg_is_device_mode(core_if)) { dctl_data_t dctl = {.d32=0}; DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)); #ifdef PARTIAL_POWER_DOWN if (core_if->hwcfg4.b.power_optimiz) { pcgcctl_data_t power = {.d32=0}; power.d32 = dwc_read_reg32(core_if->pcgcctl); DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); power.b.stoppclk = 0; dwc_write_reg32(core_if->pcgcctl, power.d32); power.b.pwrclmp = 0; dwc_write_reg32(core_if->pcgcctl, power.d32); power.b.rstpdwnmodule = 0; dwc_write_reg32(core_if->pcgcctl, power.d32); } #endif /* Clear the Remote Wakeup Signalling */ dctl.b.rmtwkupsig = 1; dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0); if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); } } else { pcgcctl_data_t pcgcctl = {.d32=0}; /* Restart the Phy Clock */ pcgcctl.b.stoppclk = 1; dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); queue_delayed_work(core_if->wq_otg, &core_if->w_wkp, ((70 * HZ / 1000) + 1)); } /* Clear interrupt */ gintsts.d32 = 0; gintsts.b.wkupintr = 1; dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); return 1; } /** * This interrupt indicates that a device has been disconnected from * the root port. */ int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t *core_if) { gintsts_data_t gintsts; DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", (dwc_otg_is_host_mode(core_if)?"Host":"Device"), op_state_str(core_if)); /** @todo Consolidate this if statement. */ #ifndef DWC_HOST_ONLY if (core_if->op_state == B_HOST) { dwc_otg_pcd_t *pcd; /* If in device mode Disconnect and stop the HCD, then * start the PCD. */ hcd_disconnect(core_if); pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_start(core_if); SPIN_UNLOCK(&pcd->lock); core_if->op_state = B_PERIPHERAL; } else if (dwc_otg_is_device_mode(core_if)) { gotgctl_data_t gotgctl = { .d32 = 0 }; gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); if (gotgctl.b.hstsethnpen==1) { /* Do nothing, if HNP in process the OTG * interrupt "Host Negotiation Detected" * interrupt will do the mode switch. */ } else if (gotgctl.b.devhnpen == 0) { dwc_otg_pcd_t *pcd; /* If in device mode Disconnect and stop the HCD, then * start the PCD. */ hcd_disconnect(core_if); pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_start(core_if); SPIN_UNLOCK(&pcd->lock); core_if->op_state = B_PERIPHERAL; } else { DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); } } else { if (core_if->op_state == A_HOST) { /* A-Cable still connected but device disconnected. */ hcd_disconnect(core_if); } } #endif gintsts.d32 = 0; gintsts.b.disconnect = 1; dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); return 1; } /** * This interrupt indicates that SUSPEND state has been detected on * the USB. * * For HNP the USB Suspend interrupt signals the change from * "a_peripheral" to "a_host". * * When power management is enabled the core will be put in low power * mode. */ int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *core_if) { dsts_data_t dsts; gintsts_data_t gintsts; DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); if (dwc_otg_is_device_mode(core_if)) { dwc_otg_pcd_t *pcd; /* Check the Device status register to determine if the Suspend * state is active. */ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " "HWCFG4.power Optimize=%d\n", dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz); #ifdef PARTIAL_POWER_DOWN /** @todo Add a module parameter for power management. */ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) { pcgcctl_data_t power = {.d32=0}; DWC_DEBUGPL(DBG_CIL, "suspend\n"); power.b.pwrclmp = 1; dwc_write_reg32(core_if->pcgcctl, power.d32); power.b.rstpdwnmodule = 1; dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); power.b.stoppclk = 1; dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); } else { DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); } #endif /* PCD callback for suspend. */ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; if(unlikely(!pcd)) { DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); BUG(); } SPIN_LOCK(&pcd->lock); pcd_suspend(core_if); SPIN_UNLOCK(&pcd->lock); } else {
uint64_t poll_pkt_message(struct my_context * mcontext,struct pkt_controls * pkt_ctls){ uint64_t unfreed_req_id = 0; int mudm_status_error=0; int refid_list_length = 0; uint64_t refid_list[NUM_REFID]; uint32_t error_list[NUM_REFID]; uint32_t ret_val[NUM_REFID]; uint64_t ref_desc_count = -1; uint64_t timestamp; struct pkt_descriptor * pktd = NULL; struct pkt_descriptor * last_in_pktd2repost_list = NULL; struct pkt_descriptor * last_in_pktd2free_list = NULL; struct pkt_descriptor * pktd2free_list = NULL; struct pkt_descriptor * pktd2repost_list = NULL; //if (NULL == pkt_ctls-> pkt_reqid_list) return 0; //checked before calling into here SPIN_LOCK(pkt_ctls->pkt_list_lock); pktd = pkt_ctls-> pkt_reqid_list; pkt_ctls-> pkt_reqid_list = NULL; //detach the list SPIN_UNLOCK(pkt_ctls->pkt_list_lock); if ( unlikely(NULL==pktd) ) return 0; // comment the next line out to cause job hang ref_desc_count = MUSPI_getHwDescCount (pkt_ctls ->IdToInjFifo); timestamp=GetTimeBase2(); while (pktd != NULL){ if (ref_desc_count >= pktd->desc_cnt){ MUDM_MU_PKT_PCOMP( &mcontext->mudm_hi_wrap_flight_recorder,pktd,pktd->ccontext,pktd->desc_cnt,pktd->req_id); if (pktd->req_id){ refid_list[refid_list_length]=pktd->req_id; pktd->req_id=0; error_list[refid_list_length]=pktd->error_val; pktd->error_val=0; refid_list_length++; } if (pktd2free_list){ last_in_pktd2free_list->next = pktd; pktd->prev = last_in_pktd2free_list; last_in_pktd2free_list = pktd; } else { pktd2free_list = pktd; last_in_pktd2free_list = pktd; pktd2free_list->prev = NULL; } pktd=pktd->next; last_in_pktd2free_list->next = NULL; //if the list saturates, return resources //and then send back refid list if ( unlikely(NUM_REFID==refid_list_length) ){ PRINT("FLUSHING: refid_list_length=%d \n", refid_list_length); SPIN_LOCK(pkt_ctls->pkt_list_lock); last_in_pktd2free_list->next = pkt_ctls->pkt_free_list; pkt_ctls->pkt_free_list = pktd2free_list; SPIN_UNLOCK(pkt_ctls->pkt_list_lock); pktd2free_list = NULL; mudm_status_error = (mcontext->status)( (void *) refid_list,error_list,mcontext->callback_context,ret_val,refid_list_length); mudm_status_error=0; refid_list_length=0; }//end of flushing the list } else {// NOT (ref_desc_count >= pktd->desc_cnt) if (pktd->req_id){ unfreed_req_id ++; } //96000000000ull is about 1 minute on a 1.6hz machine #define TOO_MANY_CYCLES 192000000000ull if ( likely(pktd->timestamp) ){ if ( unlikely( ( (timestamp - pktd->timestamp)> (TOO_MANY_CYCLES) ) && (timestamp > pktd->timestamp) ) ){ if (mcontext->StuckState==0){ uint64_t cycles_per_sec = microsec2cycles(mcontext->personality,1000000); uint64_t seconds_diff = (timestamp - pktd->timestamp)/cycles_per_sec; uint64_t num_uninjected_descriptors = log_injfifo_info (pktd->ccontext->injfifo_ctls,&mcontext->mudm_hi_wrap_flight_recorder,MUDMRAS_STUCK_INJ_RESETCOMPUTES); uint64_t entry_num = MUDM_STUCK_PKT(&mcontext->mudm_hi_wrap_flight_recorder,pktd->timestamp,timestamp,pktd->ccontext,pktd->req_id); MPRINT("num_uninjected_descriptors=%llu \n",(LLUS)num_uninjected_descriptors); MPRINT("STUCK pktd=%p timestamp=%llx pktd->timestamp=%llx pktd->desc_cnt=%llu ref_desc_count=%llu req_id=%llx\n",pktd,(LLUS)timestamp, (LLUS)pktd->timestamp,(LLUS)pktd->desc_cnt,(LLUS)ref_desc_count,(LLUS)pktd->req_id ); MPRINT("STUCKTIME=%llu (seconds) \n", (LLUS)seconds_diff); dump_flightlog_leadup(&mcontext->mudm_hi_wrap_flight_recorder, entry_num,10); dump_ccontext_info(pktd->ccontext); mcontext->StuckState = 1; //extra dumping at free time //! \todo TODO fix RAS info for stuck packet MUDM_RASBEGIN(5); MUDM_RASPUSH((uint64_t)pktd) MUDM_RASPUSH(timestamp) MUDM_RASPUSH(pktd->timestamp) MUDM_RASPUSH(pktd->desc_cnt) MUDM_RASPUSH(ref_desc_count) MUDM_RASFINAL(MUDMRAS_STUCK_PKT); }//end of dumping info for stuck packet pktd->timestamp = 0; } } if (pktd2repost_list){ last_in_pktd2repost_list->next = pktd; pktd->prev = last_in_pktd2repost_list; last_in_pktd2repost_list = pktd; } else{ pktd2repost_list = pktd; last_in_pktd2repost_list = pktd; pktd->prev=NULL; } pktd=pktd->next; last_in_pktd2repost_list->next=NULL; }//endofelse }//endwhile if (pktd2free_list){ PRINT("FLUSHING: refid_list_length=%d \n", refid_list_length); SPIN_LOCK(pkt_ctls->pkt_list_lock); last_in_pktd2free_list->next = pkt_ctls->pkt_free_list; pkt_ctls->pkt_free_list = pktd2free_list; SPIN_UNLOCK(pkt_ctls->pkt_list_lock); } if (pktd2repost_list){ SPIN_LOCK(pkt_ctls->pkt_list_lock); last_in_pktd2repost_list->next = pkt_ctls-> pkt_reqid_list; pkt_ctls-> pkt_reqid_list = pktd2repost_list; SPIN_UNLOCK(pkt_ctls->pkt_list_lock); } if (refid_list_length){ mudm_status_error = (mcontext->status)( (void *) refid_list,error_list,mcontext->callback_context,ret_val,refid_list_length); } if ( unlikely(mudm_status_error !=0) ){ int i=0; for (i=0;i<refid_list_length;i++){ if (ret_val[i])MPRINT("mudm_status_error i=%d refid_list[i]=%p ret_val[i]=%llx \n",i,(void *)refid_list[i],(LLUS)ret_val[i]); } } return unfreed_req_id; }
static int st_fetch(void *key, size_t klen, void **value, size_t *vlen, void *priv) { storage_mysql_t *st = (storage_mysql_t *)priv; char *keystr = malloc((klen*2)+ 1); char *p = (char *)key; char *o = (char *)keystr; int i; for (i = 0; i < klen; i++) { snprintf(o, 3, "%02x", p[i]); o += 2; } *o = 0; db_connection_t *dbc = st_get_dbconnection(st); if (!dbc) { free(keystr); return -1; } MYSQL_BIND bnd = { .buffer_type = MYSQL_TYPE_STRING, .buffer = keystr, .buffer_length = strlen(keystr) }; if (mysql_stmt_bind_param(dbc->select_stmt, &bnd) != 0) { // TODO - error messages SPIN_UNLOCK(&dbc->lock); free(keystr); return -1; } if (mysql_stmt_execute(dbc->select_stmt) != 0) { // TODO - error messages fprintf(stderr, "Can't execute fetch statement : %s\n", mysql_stmt_error(dbc->select_stmt)); SPIN_UNLOCK(&dbc->lock); free(keystr); return -1; } size_t size = 256; void *data = malloc(size); my_bool error = 0; MYSQL_BIND obnd = { .buffer_type = MYSQL_TYPE_LONG_BLOB, .buffer = data, .buffer_length = size, .length = &size, .error = &error }; mysql_stmt_bind_result(dbc->select_stmt, &obnd); int rc = mysql_stmt_fetch(dbc->select_stmt); if (error == 1) { data = realloc(data, size); obnd.buffer = data; obnd.buffer_length = size; error = 0; mysql_stmt_bind_result(dbc->select_stmt, &obnd); mysql_stmt_fetch(dbc->select_stmt); } if (rc != 0 || obnd.is_null) { free(data); data = NULL; } else { if (value) { *value = data; } else { free(data); } if (vlen) { *vlen = size; } } mysql_stmt_free_result(dbc->select_stmt); mysql_stmt_reset(dbc->select_stmt); SPIN_UNLOCK(&dbc->lock); free(keystr); return 0; }
static void process_idle_times (ThreadPool *tp, gint64 t) { gint64 ticks; gint64 avg; gboolean compute_avg; gint new_threads; gint64 per1; if (tp->ignore_times || t <= 0) return; compute_avg = FALSE; ticks = mono_100ns_ticks (); t = ticks - t; SPIN_LOCK (tp->sp_lock); if (tp->ignore_times) { SPIN_UNLOCK (tp->sp_lock); return; } tp->time_sum += t; tp->n_sum++; if (tp->last_check == 0) tp->last_check = ticks; else if (tp->last_check > 0 && (ticks - tp->last_check) > 5000000) { tp->ignore_times = 1; compute_avg = TRUE; } SPIN_UNLOCK (tp->sp_lock); if (!compute_avg) return; //printf ("Items: %d Time elapsed: %.3fs\n", tp->n_sum, (ticks - tp->last_check) / 10000.0); tp->last_check = ticks; new_threads = 0; avg = tp->time_sum / tp->n_sum; if (tp->averages [1] == 0) { tp->averages [1] = avg; } else { per1 = ((100 * (ABS (avg - tp->averages [1]))) / tp->averages [1]); if (per1 > 5) { if (avg > tp->averages [1]) { if (tp->averages [1] < tp->averages [0]) { new_threads = -1; } else { new_threads = 1; } } else if (avg < tp->averages [1] && tp->averages [1] < tp->averages [0]) { new_threads = 1; } } else { int min, n; min = tp->min_threads; n = tp->nthreads; if ((n - min) < min && tp->busy_threads == n) new_threads = 1; } /* if (new_threads != 0) { printf ("n: %d per1: %lld avg=%lld avg1=%lld avg0=%lld\n", new_threads, per1, avg, tp->averages [1], tp->averages [0]); } */ } tp->time_sum = 0; tp->n_sum = 0; tp->averages [0] = tp->averages [1]; tp->averages [1] = avg; tp->ignore_times = 0; if (new_threads == -1) { if (tp->destroy_thread == 0 && InterlockedCompareExchange (&tp->destroy_thread, 1, 0) == 0) pulse_on_new_job (tp); } }