bool CThreadPool::getStopForCurrentInstanceRequestedAndActivated() { if (getEmergencyStopForCurrentInstance()) return(true); _lock(15); if (_threadQueue.size()!=0) { // Make sure the thread is not running in free mode. Otherwise we return false if (!VThread::areThreadIDsSame(_threadQueue[_threadQueue.size()-1],VThread::getCurrentThreadId())) { _unlock(15); return(false); } } bool retVal=false; int inst=App::ct->getCurrentInstanceIndex(); for (int i=0;i<int(_instancesAndTheirStopRequestTimes.size())/2;i++) { if (_instancesAndTheirStopRequestTimes[2*i+0]==inst) { retVal=VDateTime::getTimeDiffInMs(_instancesAndTheirStopRequestTimes[2*i+1])>STOP_REQUEST_DELAY_IN_MS; break; } } _unlock(15); return(retVal); }
void CThreadPool::switchBackToPreviousThreadIfNeeded() { _lock(5); int fql=int(_threadQueue.size()); if (fql>1) { // Switch back only if not main thread int totalTimeInMs=VDateTime::getTimeDiffInMs(_threadStartTime[fql-1]); for (int i=0;i<int(_allThreadData.size());i++) { if (VThread::areThreadIDsSame(_allThreadData[i]->threadID,_threadQueue[fql-1])) { if (_allThreadData[i]->threadDesiredTiming<=totalTimeInMs) { if (!_allThreadData[i]->threadShouldNotSwitch) { _unlock(5); switchBackToPreviousThread(); // Has its own locking / unlocking return; } } break; } } } _unlock(5); }
void c2_db_cronosII_message_set_mark (C2Db *db, gboolean mark) { C2Mailbox *mailbox; FILE *fd; gint pos; mailbox = db->mailbox; _lock (mailbox); if (!goto_mid (mailbox, db->mid)) return; fd = mailbox->protocol.cronosII.fd; /* Move to the 2nd \r */ pos = ftell (fd); if (c2_fd_move_to (fd, '\r', 2, TRUE, TRUE) < 0) { fseek (fd, pos, SEEK_SET); _rewind (mailbox); _unlock (mailbox); return; } fputc (mark ? '1' : '0', fd); fseek (fd, pos, SEEK_SET); _unlock (mailbox); }
bool CThreadPool::getEmergencyStopForCurrentInstance() { _lock(12); if (_threadQueue.size()!=0) { // Make sure the thread is not running in free mode. Otherwise we return false if (!VThread::areThreadIDsSame(_threadQueue[_threadQueue.size()-1],VThread::getCurrentThreadId())) { _unlock(12); return(false); } } bool retVal=false; int inst=App::ct->getCurrentInstanceIndex(); for (int i=0;i<int(_instancesThatRequestedEmergencyStop.size());i++) { if (_instancesThatRequestedEmergencyStop[i]==inst) { retVal=true; break; } } _unlock(12); return(retVal); }
void *chkheap(void) { #ifdef DEBUG PACKET *pkt, *top; _lock(); /* find the start of the heap */ pkt = sys_base; top = (PACKET *)((char *)sys_base + memsize - sizeof(PACKET)); while (pkt < top) { if (pkt->guard != GUARDWORD) { _unlock(); return (void *) &pkt->guard; } if (pkt->packet_size > 0) pkt = (PACKET *)((char *)pkt + pkt->packet_size + OVERHEAD); else pkt = (PACKET *)((char *)pkt + -pkt->packet_size + OVERHEAD); } _unlock(); #endif return 0; }
void *malloc(size_t size) { register PACKET *current; register size_t newsize; register size_t oldsize; if (size <= 0) return NULL; if (check_alloc_size(size) == 0) return 0; if (need_mem_init) minit(); _lock(); /*-----------------------------------------------------------------------*/ /* SIZE IS CALCULATED BY FIRST ALIGNING (SIZE + BLOCK OVERHEAD) TO THE */ /* REQUIRED MINIMUM ALIGNMENT AND THEN SUBTRACTING THE BLOCK OVERHEAD. */ /*-----------------------------------------------------------------------*/ newsize = _M_RNDUP((size + _M_BLOCK_OVERHEAD), _M_MIN_ALN) - _M_BLOCK_OVERHEAD; current = sys_free; /*-----------------------------------------------------------------------*/ /* SCAN THROUGH FREE LIST FOR PACKET LARGE ENOUGH TO CONTAIN PACKET */ /*-----------------------------------------------------------------------*/ while (current && current->packet_size < newsize) current = current->size_ptr; if (!current) { _unlock(); return NULL; } oldsize = current->packet_size; /* REMEMBER OLD SIZE */ mremove(current); /* REMOVE PACKET FROM FREE LIST */ /*-----------------------------------------------------------------------*/ /* IF PACKET IS LARGER THAN NEEDED, FREE EXTRA SPACE AT END */ /* BY INSERTING REMAINING SPACE INTO FREE LIST. */ /*-----------------------------------------------------------------------*/ if (oldsize - newsize >= (_M_MIN_BLOCK + _M_BLOCK_OVERHEAD)) { register PACKET *next = (PACKET *) ((char *) current + _M_BLOCK_OVERHEAD + newsize); next->packet_size = oldsize - newsize - _M_BLOCK_OVERHEAD; minsert(next); current->packet_size = newsize; } current->packet_size |= _M_BLOCK_USED; _unlock(); return (char *)current + _M_BLOCK_OVERHEAD; }
void LinuxThread::_assume(){ _lock(m_runMutex); if(!m_bRun){ m_bRun = true; _unlock(m_runMutex); _notifyAll(m_cond); }else{ _unlock(m_runMutex); } }
void waitevent(struct semaphore * sem) { _lock(); if(sem->value==0) { kePendTask(&sem->pendingtasks, tasks[currentTaskId]); _unlock(); keDoSchedNormal(); _lock(); } sem->value=0; _unlock(); }
void LinuxThread::_suspend(){ _lock(m_runMutex); if(m_bRun){ m_bRun = false; _unlock(m_runMutex); _notifyAll(m_cond); _lock(m_runMutex); } while(!m_bRun) _waitForCondition(m_cond,m_runMutex); _unlock(m_runMutex); }
void minit(void) { _lock(); memsize = MEMORY_SIZE; /*-----------------------------------------------------------------------*/ /* To initialize the memory system, set up the free list to point to */ /* the entire heap, and initialize heap to a single empty packet. */ /* We're assuming _sys_memory is aligned. Assembly is used since the */ /* --near_data=globals option would cause a C assignment to produce */ /* incorrect code because _sys_memory appears to be initialized data. */ /*-----------------------------------------------------------------------*/ sys_free = (PACKET*)_sys_memory; if (memsize & 1) --memsize; sys_free->packet_size = -(memsize - OVERHEAD); /* NEGATIVE==FREE */ sys_free->next_free = LIMIT; #ifdef DEBUG sys_free->guard = GUARDDWORD; #endif sys_base = sys_free; first_call = 0; /* CLEAR THE FLAG */ _unlock(); }
bool LinuxThread::_shouldTerminate(){ bool terminate; _lock(m_terminateMutex); terminate = m_bTerminate; _unlock(m_terminateMutex); return terminate; }
void CThreadPool::setRequestStopForCurrentInstance(bool stop) { _lock(13); int inst=App::ct->getCurrentInstanceIndex(); bool done=false; for (int i=0;i<int(_instancesAndTheirStopRequestTimes.size())/2;i++) { if (_instancesAndTheirStopRequestTimes[2*i+0]==inst) { if (!stop) _instancesAndTheirStopRequestTimes.erase(_instancesAndTheirStopRequestTimes.begin()+2*i,_instancesAndTheirStopRequestTimes.begin()+2*i+2); done=true; break; } } if (!done) { if (stop) { _instancesAndTheirStopRequestTimes.push_back(inst); _instancesAndTheirStopRequestTimes.push_back(VDateTime::getTimeInMs()); } } _unlock(13); }
static int adb_release(struct inode *ip, struct file *fp) { struct adb_context *ctxt = &_context; _unlock(&ctxt->open_excl); return 0; }
Status LinuxThread::Wait() { CHECK_ERROR(m_threadId); _lock(m_terminateMutex); if(m_bTerminate){ _unlock(m_terminateMutex); return ER; } _unlock(m_terminateMutex); _lock(m_runMutex); while(m_bRun) _waitForCondition(m_cond,m_runMutex); _unlock(m_runMutex); return OK; }
void minit(void) { _lock(); memsize = MEMORY_SIZE; /*-----------------------------------------------------------------------*/ /* To initialize the memory system, set up the free list to point to */ /* the entire heap, and initialize heap to a single empty packet. */ /*-----------------------------------------------------------------------*/ /* We may need to adjust the start of the heap to ensure that the */ /* address of the field "next_free" is strictly aligned. */ /*-----------------------------------------------------------------------*/ if (((memsz_t)_sys_memory ^ OVERHEAD) & 1) { sys_free = (PACKET *) (_sys_memory + 1); --memsize; } else { sys_free = (PACKET *) _sys_memory; } if (memsize & 1) --memsize; sys_free->packet_size = -(memsize - OVERHEAD); /* NEGATIVE==FREE */ sys_free->next_free = LIMIT; #ifdef DEBUG sys_free->guard = GUARDWORD; #endif sys_base = sys_free; first_call = 0; /* CLEAR THE FLAG */ _unlock(); }
struct taskblock* keNewTask(char *name, TASK_ENTRY entry, uint32 param, uint32 priority, uint32 stacksize) { int i; struct taskblock* newtask; if(priority>15) return 0; i=keNewTaskId(); if(i==0xFFFFFFFF) return 0; if(stacksize==0) stacksize=0x4000; tasks[i]=keMalloc(stacksize+sizeof(struct taskblock)); newtask=tasks[i]; newtask->next=NULL; newtask->std[0] = tasks[currentTaskId]->std[0]; newtask->std[1] = tasks[currentTaskId]->std[1]; newtask->taskid=i; newtask->priority=priority; newtask->queue=NULL; strncpy(newtask->taskname, name, 32); newtask->taskname[31]=0; newtask->esp=(uint32*)&newtask->stacks[stacksize-16]; newtask->esp[0]=(uint32)entry; newtask->esp[1]=(uint32)keTaskEnd; newtask->esp[2]=param; _lock(); keActiveTask(newtask); _unlock(); return newtask; }
int fini(void) { _lock(); _free_libstate(); _unlock(); return SLURM_SUCCESS; }
void c2_db_cronosII_message_set_state (C2Db *db, C2MessageState state) { C2Mailbox *mailbox; gchar c = ' '; switch (state) { case C2_MESSAGE_UNREADED: c = 'N'; break; case C2_MESSAGE_READED: c = ' '; break; case C2_MESSAGE_REPLIED: c = 'R'; break; case C2_MESSAGE_FORWARDED: c = 'F'; break; default: g_assert_not_reached (); } mailbox = db->mailbox; _lock (mailbox); if (!goto_mid (mailbox, db->mid)) return; fputc (c, mailbox->protocol.cronosII.fd); fseek (mailbox->protocol.cronosII.fd, -1, SEEK_CUR); _unlock (mailbox); }
bool CThreadPool::setThreadSwitchTiming(int timeInMs) { _lock(1); bool retVal=false; int fqs=int(_threadQueue.size()); if (fqs>1) { VTHREAD_ID_TYPE fID=_threadQueue[fqs-1]; for (int i=0;i<int(_allThreadData.size());i++) { if (VThread::areThreadIDsSame(_allThreadData[i]->threadID,fID)) { if (timeInMs<0) timeInMs=0; if (timeInMs>200) timeInMs=200; _allThreadData[i]->threadDesiredTiming=timeInMs; retVal=true; break; } } } _unlock(1); return(retVal); }
EXPORT_SYM const char * hdfs_namenode_connect(struct hdfs_namenode *n, const char *host, const char *port) { const char *error = NULL; _lock(&n->nn_lock); ASSERT(n->nn_sock == -1); if (n->nn_kerb == HDFS_TRY_KERB || n->nn_kerb == HDFS_REQUIRE_KERB) { int r = sasl_client_new("hdfs", host, NULL/*localip*/, NULL/*remoteip*/, NULL/*CBs*/, 0/*sec flags*/, &n->nn_sasl_ctx); if (r != SASL_OK) { error = sasl_errstring(r, NULL, NULL); goto out; } } error = _connect(&n->nn_sock, host, port); if (error) goto out; out: _unlock(&n->nn_lock); return error; }
void flashpage_write_raw(void *target_addr, const void *data, size_t len) { /* The actual minimal block size for writing is 16B, thus we * assert we write on multiples and no less of that length. */ assert(!(len % FLASHPAGE_RAW_BLOCKSIZE)); /* ensure 4 byte aligned writes */ assert(!(((unsigned)target_addr % FLASHPAGE_RAW_ALIGNMENT) || ((unsigned)data % FLASHPAGE_RAW_ALIGNMENT))); /* ensure the length doesn't exceed the actual flash size */ assert(((unsigned)target_addr + len) < (CPU_FLASH_BASE + (FLASHPAGE_SIZE * FLASHPAGE_NUMOF))); uint32_t *dst = (uint32_t *)target_addr; const uint32_t *data_addr = data; /* write 4 bytes in one go */ len /= 4; _unlock(); NVMCTRL->CTRLA.reg = (NVMCTRL_CTRLA_CMDEX_KEY | NVMCTRL_CTRLA_CMD_PBC); for (unsigned i = 0; i < len; i++) { *dst++ = *data_addr++; } NVMCTRL->CTRLA.reg = (NVMCTRL_CTRLA_CMDEX_KEY | NVMCTRL_CTRLA_CMD_WP); _lock(); }
bool CThreadPool::setThreadResumeLocation(int location,int order) { _lock(99); bool retVal=false; int fqs=int(_threadQueue.size()); if (fqs>1) { VTHREAD_ID_TYPE fID=_threadQueue[fqs-1]; for (int i=0;i<int(_allThreadData.size());i++) { if (VThread::areThreadIDsSame(_allThreadData[i]->threadID,fID)) { if (order<0) order=-1; if (order>0) order=1; _allThreadData[i]->threadResumeLocationAndOrder=location*3+(order+1); retVal=true; break; } } } _unlock(1); return(retVal); }
EXPORT_SYM struct hdfs_error hdfs_namenode_connect(struct hdfs_namenode *n, const char *host, const char *port) { struct hdfs_error error = HDFS_SUCCESS; _lock(&n->nn_lock); ASSERT(n->nn_sock == -1); if (n->nn_kerb == HDFS_TRY_KERB || n->nn_kerb == HDFS_REQUIRE_KERB) { int r = sasl_client_new("hdfs", host, NULL/*localip*/, NULL/*remoteip*/, NULL/*CBs*/, 0/*sec flags*/, &n->nn_sasl_ctx); if (r != SASL_OK) { error = error_from_sasl(r); goto out; } } error = _connect(&n->nn_sock, host, port); if (hdfs_is_error(error)) goto out; out: _unlock(&n->nn_lock); return error; }
static rt_size_t rt_uart_write(rt_device_t dev, rt_off_t pos, const void* buffer, rt_size_t size) { volatile uint8_t *p; struct uart_device * uart_dev; _lock((struct uart_device *)dev); uart_dev = (struct uart_device*)dev; p = (uint8_t*)buffer; while(size--) { /* * to be polite with serial console add a line feed * to the carriage return character */ if (*p == '\n' && (dev->open_flag & RT_DEVICE_FLAG_STREAM)) { UART_WriteByte(uart_dev->hw_instance, '\r'); } UART_WriteByte(uart_dev->hw_instance, *p++); } _unlock((struct uart_device *)dev); return size; }
bool LinuxThread::IsRunning() { bool run; _lock(m_runMutex); run = m_bRun; _unlock(m_runMutex); return run; }
//---------------------------------------------------------------------------// void GpuBufferGL4::destroy() { if (!isValid()) { return; } ASSERT( isLockedPersistent() || !isLocked(), "Trying to destroy a locked buffer"); // Persistent buffers stay locked and have to be unlocked here if (isLockedPersistent()) { for (uint32 i = 0; i < m_vGLhandles.size(); ++i) { _unlock(i); } } for (uint32 i = 0; i < m_vGLhandles.size(); ++i) { glDeleteBuffers(1, &m_vGLhandles[i]); } m_vGLhandles.clear(); m_clStateInfos.isLocked = false; m_clStateInfos.isLockedPersistent = false; }
static ssize_t diag2arm9_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct diag_context *ctxt = &_context; int r = count; int writed = 0; DBG("diag2arm9_write(%d)\n", count); if (_lock(&ctxt->write_arm9_excl)) return -EBUSY; while(count > 0) { writed = count > ARM9_MAX ? ARM9_MAX : count; if (copy_from_user(ctxt->toARM9_buf, buf, writed)) { r = -EFAULT; break; } smd_write(ctxt->ch, ctxt->toARM9_buf, writed); smd_xfer_count_func(writed, data_set_tx); buf += writed; count -= writed; } _unlock(&ctxt->write_arm9_excl); return r; }
static ssize_t adb_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct adb_context *ctxt = &_context; struct usb_request *req = 0; int r = count, xfer; int ret; DBG("adb_write(%d)\n", count); if (_lock(&ctxt->write_excl)) return -EBUSY; while (count > 0) { if (ctxt->error) { r = -EIO; break; } /* get an idle tx request to use */ req = 0; ret = wait_event_interruptible(ctxt->write_wq, ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error)); if (ret < 0) { r = ret; break; } if (req != 0) { xfer = count > TXN_MAX ? TXN_MAX : count; if (copy_from_user(req->buf, buf, xfer)) { r = -EFAULT; break; } req->length = xfer; ret = usb_ept_queue_xfer(ctxt->in, req); if (ret < 0) { DBG("adb_write: xfer error %d\n", ret); ctxt->error = 1; r = -EIO; break; } buf += xfer; count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } } if (req) req_put(ctxt, &ctxt->tx_idle, req); _unlock(&ctxt->write_excl); return r; }
/* Add the new node information to our libstate cache, making a copy if * information is new. Otherwise, swap the data and return to the user old * data, which is fine in this case since it is only deleted by slurmctld */ static void _cache_node_info(sw_gen_node_info_t *new_node_info) { sw_gen_node_info_t *old_node_info; uint16_t ifa_cnt; sw_gen_ifa_t **ifa_array; struct sw_gen_node_info *next; bool new_alloc; /* True if this is new node to be added to cache */ _lock(); old_node_info = _find_node(new_node_info->node_name); new_alloc = (old_node_info == NULL); if (new_alloc) { (void) switch_p_alloc_node_info((switch_node_info_t **) &old_node_info); old_node_info->node_name = xstrdup(new_node_info->node_name); } /* Swap contents */ ifa_cnt = old_node_info->ifa_cnt; ifa_array = old_node_info->ifa_array; next = old_node_info->next; old_node_info->ifa_cnt = new_node_info->ifa_cnt; old_node_info->ifa_array = new_node_info->ifa_array; old_node_info->next = new_node_info->next; new_node_info->ifa_cnt = ifa_cnt; new_node_info->ifa_array = ifa_array; new_node_info->next = next; if (new_alloc) _hash_add_nodeinfo(old_node_info); _unlock(); }
void CThreadPool::_terminateThread() { _lock(6); VTHREAD_ID_TYPE nextThreadToSwitchTo=0; for (int i=0;i<int(_allThreadData.size());i++) { int fql=int(_threadQueue.size()); if (VThread::areThreadIDsSame(_allThreadData[i]->threadID,_threadQueue[fql-1])) { if (_showThreadSwitches) { std::string tmp("==q Terminating thread: "); tmp+=boost::lexical_cast<std::string>((unsigned long)_allThreadData[i]->threadID); tmp+="\n"; CDebugLogFile::addDebugText(false,tmp.c_str()); // printf("Terminating thread: %lu\n",(unsigned long)_allThreadData[i]->threadID); } _allThreadData[i]->threadID=VTHREAD_ID_DEAD; // To indicate we need clean-up nextThreadToSwitchTo=_threadQueue[fql-2]; // This will be the next thread we wanna switch to break; } } _unlock(6); switchToThread(nextThreadToSwitchTo); // We switch to the calling thread (previous thread) }