/** * Adds a message as a task to the task queue. * This blocks if the task queue is full, until there is space. * @param p - the peer that the message was received from * @param msg - the message * @returns 1 on success, 0 on failure (eg. shutdown in progress) */ int put_task(peer *p,AAAMessage *msg) { lock_get(tasks->lock); while ((tasks->end+1)%tasks->max == tasks->start){ lock_release(tasks->lock); if (*shutdownx) { sem_release(tasks->full); return 0; } sem_get(tasks->full); if (*shutdownx) { sem_release(tasks->full); return 0; } lock_get(tasks->lock); } tasks->queue[tasks->end].p = p; tasks->queue[tasks->end].msg = msg; tasks->end = (tasks->end+1) % tasks->max; if (sem_release(tasks->empty)<0) LOG(L_WARN,"WARN:put_task(): Error releasing tasks->empty semaphore > %s!\n",strerror(errno)); lock_release(tasks->lock); return 1; }
/** * Remove and return the first task from the queue (FIFO). * This blocks until there is something in the queue. * @returns the first task from the queue or an empty task on error (eg. shutdown in progress) */ task_t take_task() { task_t t={0,0}; // LOG(L_CRIT,"-1-\n"); lock_get(tasks->lock); // LOG(L_CRIT,"-2-\n"); while(tasks->start == tasks->end){ // LOG(L_CRIT,"-3-\n"); lock_release(tasks->lock); // LOG(L_CRIT,"-4-\n"); if (*shutdownx) { sem_release(tasks->empty); return t; } // LOG(L_ERR,"-"); sem_get(tasks->empty); // LOG(L_CRIT,"-5-\n"); if (*shutdownx) { sem_release(tasks->empty); return t; } lock_get(tasks->lock); // LOG(L_CRIT,"-6-\n"); } // LOG(L_CRIT,"-7-\n"); t = tasks->queue[tasks->start]; tasks->queue[tasks->start].msg = 0; tasks->start = (tasks->start+1) % tasks->max; if (sem_release(tasks->full)<0) LOG(L_WARN,"WARN:take_task(): Error releasing tasks->full semaphore > %s!\n",strerror(errno)); lock_release(tasks->lock); return t; }
/** * Remove and return the first task from the queue (FIFO). * This blocks until there is something in the queue. * @returns the first task from the queue or an empty task on error (eg. shutdown in progress) */ task_t take_task() { task_t t = {0, 0}; lock_get(tasks->lock); while (tasks->start == tasks->end) { lock_release(tasks->lock); if (*shutdownx) { sem_release(tasks->empty); return t; } sem_get(tasks->empty); if (*shutdownx) { sem_release(tasks->empty); return t; } lock_get(tasks->lock); } t = tasks->queue[tasks->start]; tasks->queue[tasks->start].msg = 0; tasks->start = (tasks->start + 1) % tasks->max; if (sem_release(tasks->full) < 0) LM_WARN("Error releasing tasks->full semaphore > %s!\n", strerror(errno)); lock_release(tasks->lock); //int num_tasks = tasks->end - tasks->start; //LM_ERR("Taken task from task queue. Queue length [%i]", num_tasks); return t; }
void qsem_release (qsem_t *s) { #ifdef I386 /* no atomic_add() in i386 */ sem_release(s->mutex); #else if (atomic_add (&s->count, 1) < 0) sem_release (s->mutex); #endif }
/** * Destroys the worker structures. */ void worker_destroy() { int i,sval=0; if (callbacks){ while(callbacks->head) cb_remove(callbacks->head); shm_free(callbacks); } // to deny runing the poison queue again config->workers = 0; if (tasks) { // LOG(L_CRIT,"-1-\n"); lock_get(tasks->lock); for(i=0;i<tasks->max;i++){ if (tasks->queue[i].msg) AAAFreeMessage(&(tasks->queue[i].msg)); tasks->queue[i].msg = 0; tasks->queue[i].p = 0; } lock_release(tasks->lock); LOG(L_INFO,"Unlocking workers waiting on empty queue...\n"); for(i=0;i<config->workers;i++) sem_release(tasks->empty); LOG(L_INFO,"Unlocking workers waiting on full queue...\n"); i=0; while(sem_getvalue(tasks->full,&sval)==0) if (sval<=0) { sem_release(tasks->full); i=1; } else break; sleep(i); lock_get(tasks->lock); // LOG(L_CRIT,"-2-\n"); shm_free(tasks->queue); lock_destroy(tasks->lock); lock_dealloc((void*)tasks->lock); // LOG(L_CRIT,"-3-\n"); //lock_release(tasks->empty); sem_free(tasks->full); sem_free(tasks->empty); shm_free(tasks); } }
/** * Adds a message as a task to the task queue. * This blocks if the task queue is full, until there is space. * @param p - the peer that the message was received from * @param msg - the message * @returns 1 on success, 0 on failure (eg. shutdown in progress) */ int put_task(peer *p, AAAMessage *msg) { struct timeval start, stop; long elapsed_useconds=0, elapsed_seconds=0, elapsed_millis=0; lock_get(tasks->lock); gettimeofday(&start, NULL); while ((tasks->end + 1) % tasks->max == tasks->start) { lock_release(tasks->lock); if (*shutdownx) { sem_release(tasks->full); return 0; } sem_get(tasks->full); if (*shutdownx) { sem_release(tasks->full); return 0; } lock_get(tasks->lock); } gettimeofday(&stop, NULL); elapsed_useconds = stop.tv_usec - start.tv_usec; elapsed_seconds = stop.tv_sec - start.tv_sec; elapsed_useconds = elapsed_seconds*1000000 + elapsed_useconds; elapsed_millis = elapsed_useconds/1000; if (elapsed_millis > workerq_latency_threshold) { LM_ERR("took too long to put task into task queue > %d - [%ld]\n", workerq_latency_threshold, elapsed_millis); } tasks->queue[tasks->end].p = p; tasks->queue[tasks->end].msg = msg; tasks->end = (tasks->end + 1) % tasks->max; if (sem_release(tasks->empty) < 0) LM_WARN("Error releasing tasks->empty semaphore > %s!\n", strerror(errno)); lock_release(tasks->lock); //int num_tasks = tasks->end - tasks->start; //LM_ERR("Added task to task queue. Queue length [%i]", num_tasks); return 1; }
/* block_cache_sync() - flush all dirty blocks. */ s32 block_cache_sync() { block_descriptor_t *bd, * const end = bc.descriptors + bc.nblocks; u32 one = 1; s32 ret; for(bd = bc.descriptors; bd != end; ++bd) { sem_acquire(&bd->sem); if(bd->flags & BC_DIRTY) { void *data = bc.cache + ((bd - bc.descriptors) * BLOCK_SIZE); ret = bd->dev->write(bd->dev, bd->block, &one, data); if(ret != SUCCESS) return ret; bd->flags &= ~BC_DIRTY; } sem_release(&bd->sem); } return SUCCESS; }
void kernel_footprint(void) { init(); // generate code for process struct Process *p = proc_new(proc1_main, 0, sizeof(proc1_stack), proc1_stack); proc_setPri(p, 5); proc_yield(); // generate code for msg Msg msg; msg_initPort(&in_port, event_createSignal(p, SIG_USER1)); msg_put(&in_port, &msg); msg_peek(&in_port); Msg *msg_re = msg_get(&in_port); msg_reply(msg_re); // generate code for signals sig_send(p, SIG_USER0); // generate code for msg Semaphore sem; sem_init(&sem); sem_obtain(&sem); sem_release(&sem); sig_wait(SIG_USER0); }
int if_output(cbuf *b, ifnet *i) { bool release_sem = false; bool enqueue_failed = false; //printf("if out"); // stick the buffer on a transmit queue mutex_lock(&i->tx_queue_lock); if(fixed_queue_enqueue(&i->tx_queue, b) < 0) enqueue_failed = true; if(i->tx_queue.count == 1) release_sem = true; mutex_unlock(&i->tx_queue_lock); if(enqueue_failed) { cbuf_free_chain(b); return ERR_NO_MEMORY; } if(release_sem) sem_release(i->tx_queue_sem); //printf("if %x out ok\n", i); return NO_ERROR; }
/** * Poisons the worker queue. * Actually it just releases the task queue locks so that the workers get to evaluate * if a shutdown is in process and exit. */ void worker_poison_queue() { int i; if (config->workers && tasks) for (i = 0; i < config->workers; i++) if (sem_release(tasks->empty) < 0) LM_WARN("Error releasing tasks->empty semaphore > %s!\n", strerror(errno)); }
float measures_intTemp(void) { sem_obtain(&i2c_sem); float res = DEG_T_TO_FLOATDEG(lm75_read(&i2c_bus, LM75_ADDR)); sem_release(&i2c_sem); return res; }
void consumer(void* arg) { int itm, id; id = *(int *)arg; printf("Consumer %d\n", id); while(1) { sem_acquire(shared.full); sem_acquire(shared.mutex); itm = shared.buf[shared.out]; shared.out = (shared.out + 1) % SH_SIZE; printf("Consumer %d consumed item no %d\n", id, itm); sem_release(shared.mutex); sem_release(shared.empty); sleep(rand() % 3); } }
void save(struct virtscreen *vscr) { sem_acquire(vscr->lock); if(vscr->data != vscr->back) { memcpy(vscr->back,vscr->data,vscr->num_bytes); vscr->data = vscr->back; } sem_release(vscr->lock); }
void producer(void* arg) { int id; id = *(int *)arg; printf("Producer %d\n", id); while(1) { sem_acquire(shared.empty); sem_acquire(shared.mutex); shared.buf[shared.in] = item++; shared.in = (shared.in + 1) % SH_SIZE; printf("Producer %d produced item no : %d\n", id, item); sem_release(shared.mutex); sem_release(shared.full); sleep(rand() % 3); } }
void vputs(struct virtscreen *vscr, char *s) { sem_acquire(vscr->lock); while(*s) { char_to_virtscreen(vscr, *s); if(*s == '\n') char_to_virtscreen(vscr, '\r'); s++; } sem_release(vscr->lock); }
void mutex_unlock(mutex *m) { thread_id me = thread_get_current_thread_id(); if(me != m->holder) panic("mutex_unlock failure: thread 0x%x is trying to release mutex %p (current holder 0x%x)\n", me, m, m->holder); m->holder = -1; sem_release(m->sem, 1); }
void task_time() { printf("TIME TASK (ID %d) started.\n",CURRENT_TASK_ID); yield(); while(true) { sem_acquire(&lcd_sem); lcd_cursor(0,0); lcd_printf("Runtime: %d.%d ",millis()/1000, (millis()%1000)/100); sem_release(&lcd_sem); sleep_for(100); } }
void rhine_xmit(rhine *r, const char *ptr, ssize_t len) { #if 0 PANIC_UNIMPLEMENTED(); #if 0 int i; #endif //restart: sem_acquire(r->tx_sem, 1); mutex_lock(&r->lock); #if 0 dprintf("XMIT %d %x (%d)\n",r->txbn, ptr, len); dprintf("dumping packet:"); for(i=0; i<len; i++) { if(i%8 == 0) dprintf("\n"); dprintf("0x%02x ", ptr[i]); } dprintf("\n"); #endif int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); #if 0 /* wait for clear-to-send */ if(!(RTL_READ_32(r, RT_TXSTATUS0 + r->txbn*4) & RT_TX_HOST_OWNS)) { dprintf("rhine_xmit: no txbuf free\n"); rhine_dumptxstate(r); release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); sem_release(r->tx_sem, 1); goto restart; } #endif memcpy((void*)(r->txbuf + r->txbn * 0x800), ptr, len); if(len < ETHERNET_MIN_SIZE) len = ETHERNET_MIN_SIZE; RTL_WRITE_32(r, RT_TXSTATUS0 + r->txbn*4, len | 0x80000); if(++r->txbn >= 4) r->txbn = 0; release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); #endif }
float measures_acceleration(Mma845xAxis axis) { sem_obtain(&i2c_sem); int acc = mma845x_read(&i2c_bus, 0, axis); sem_release(&i2c_sem); if (acc == MMA_ERROR) return -6.66; else return (acc * 9.81 * 4.0) / 512; }
void push_cdp_cb_event(cdp_cb_event_t* event) { lock_get(cdp_event_list->lock); if (cdp_event_list->head == 0) { //empty list cdp_event_list->head = cdp_event_list->tail = event; } else { cdp_event_list->tail->next = event; cdp_event_list->tail = event; } sem_release(cdp_event_list->empty); lock_release(cdp_event_list->lock); }
void keypress(int key) { char c; sem_acquire(active->lock); char_to_virtscreen(active, key); sem_release(active->lock); if(remote_port > 0) { c = key; port_send(send_port, remote_port, &c, 1, 0); } }
void load(struct virtscreen *vscr) { sem_acquire(vscr->lock); if(vscr->data == vscr->back) { vscr->data = screen; memcpy(vscr->data,vscr->back,vscr->num_bytes); } active = vscr; movecursor(vscr->xpos,vscr->ypos); sem_release(vscr->lock); status(vscr - con); }
void push_cdp_cb_event(cdp_cb_event_t* event) { lock_get(cdp_event_list->lock); if (cdp_event_list->head == 0) { //empty list cdp_event_list->head = cdp_event_list->tail = event; } else { cdp_event_list->tail->next = event; cdp_event_list->tail = event; } cdp_event_list->size++; if(cdp_event_list_size_threshold > 0 && cdp_event_list->size > cdp_event_list_size_threshold) { LM_WARN("cdp_event_list is size [%d] and has exceed cdp_event_list_size_threshold of [%d]", cdp_event_list->size, cdp_event_list_size_threshold); } sem_release(cdp_event_list->empty); lock_release(cdp_event_list->lock); }
bool recursive_lock_unlock(recursive_lock *lock) { thread_id thid = thread_get_current_thread_id(); bool retval = false; if(thid != lock->holder) panic("recursive_lock %p unlocked by non-holder thread!\n", lock); if(--lock->recursion == 0) { lock->holder = -1; sem_release(lock->sem, 1); retval = true; } return retval; }
void task_scroller() { printf("SCROLLER TASK (ID %d) started.\n",CURRENT_TASK_ID); yield(); uint8_t mode = 0; while(true) { mode = !mode; for(uint8_t i = 0; i < 16; i++) { sem_acquire(&lcd_sem); lcd_cursor(i,1); lcd_data(mode?0xFF:' '); sem_release(&lcd_sem); sleep_for(20); } } }
// // Test malloc() // int malloc_thread(void*) { for (int i = 0; i < 50000; i++){ #if LOOK_ITS_A_RACE sem_acquire(malloc_sem); #endif free(malloc(10000)); #if LOOK_ITS_A_RACE sem_release(malloc_sem); #endif } printf("malloc thread finished\n"); os_terminate(0); return 0; }
void push_reginfo_event(reginfo_event_t* event) { lock_get(reginfo_event_list->lock); if (reginfo_event_list->head == 0) { //empty list reginfo_event_list->head = reginfo_event_list->tail = event; } else { reginfo_event_list->tail->next = event; reginfo_event_list->tail = event; } reginfo_event_list->size++; if(reginfo_queue_size_threshold > 0 && reginfo_event_list->size > reginfo_queue_size_threshold) { LM_WARN("Reginfo queue is size [%d] and has exceed reginfo_queue_size_threshold of [%d]", reginfo_event_list->size, reginfo_queue_size_threshold); } sem_release(reginfo_event_list->empty); lock_release(reginfo_event_list->lock); }
static void NORETURN acc_process(void) { ticks_t start = timer_clock(); mtime_t delay = 0; while (1) { sem_obtain(&i2c_sem); bool r = mma845x_rawAcc(&i2c_bus, 0, acc_buf[acc_idx].acc); sem_release(&i2c_sem); if (!r) kprintf("ACC error!\n"); if (++acc_idx >= countof(acc_buf)) { acc_idx = 0; logging_acc(acc_buf, sizeof(acc_buf)); } /* Wait for the next sample adjusting for time spent above */ delay += (1000 / ACC_SAMPLE_RATE); timer_delay(delay - ticks_to_ms(timer_clock() - start)); } }
ssize_t rhine_rx(rhine *r, char *buf, ssize_t buf_len) { PANIC_UNIMPLEMENTED(); #if 0 rx_entry *entry; uint32 tail; uint16 len; int rc; bool release_sem = false; // dprintf("rhine_rx: entry\n"); if(buf_len < 1500) return -1; restart: sem_acquire(r->rx_sem, 1); mutex_lock(&r->lock); int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); tail = TAILREG_TO_TAIL(RTL_READ_16(r, RT_RXBUFTAIL)); // dprintf("tailreg = 0x%x, actual tail 0x%x\n", RTL_READ_16(r, RT_RXBUFTAIL), tail); if(tail == RTL_READ_16(r, RT_RXBUFHEAD)) { release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } if(RTL_READ_8(r, RT_CHIPCMD) & RT_CMD_RX_BUF_EMPTY) { release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } // grab another buffer entry = (rx_entry *)((uint8 *)r->rxbuf + tail); // dprintf("entry->status = 0x%x\n", entry->status); // dprintf("entry->len = 0x%x\n", entry->len); // see if it's an unfinished buffer if(entry->len == 0xfff0) { release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } // figure the len that we need to copy len = entry->len - 4; // minus the crc // see if we got an error if((entry->status & RT_RX_STATUS_OK) == 0 || len > ETHERNET_MAX_SIZE) { // error, lets reset the card rhine_resetrx(r); release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } // copy the buffer if(len > buf_len) { dprintf("rhine_rx: packet too large for buffer (len %d, buf_len %ld)\n", len, (long)buf_len); RTL_WRITE_16(r, RT_RXBUFTAIL, TAILREG_TO_TAIL(RTL_READ_16(r, RT_RXBUFHEAD))); rc = ERR_TOO_BIG; release_sem = true; goto out; } if(tail + len > 0xffff) { // dprintf("packet wraps around\n"); memcpy(buf, (const void *)&entry->data[0], 0x10000 - (tail + 4)); memcpy((uint8 *)buf + 0x10000 - (tail + 4), (const void *)r->rxbuf, len - (0x10000 - (tail + 4))); } else { memcpy(buf, (const void *)&entry->data[0], len); } rc = len; // calculate the new tail tail = ((tail + entry->len + 4 + 3) & ~3) % 0x10000; // dprintf("new tail at 0x%x, tailreg will say 0x%x\n", tail, TAIL_TO_TAILREG(tail)); RTL_WRITE_16(r, RT_RXBUFTAIL, TAIL_TO_TAILREG(tail)); if(tail != RTL_READ_16(r, RT_RXBUFHEAD)) { // we're at last one more packet behind release_sem = true; } out: release_spinlock(&r->reg_spinlock); int_restore_interrupts(); if(release_sem) sem_release(r->rx_sem, 1); mutex_unlock(&r->lock); #if 0 { int i; dprintf("RX %x (%d)\n", buf, len); dprintf("dumping packet:"); for(i=0; i<len; i++) { if(i%8 == 0) dprintf("\n"); dprintf("0x%02x ", buf[i]); } dprintf("\n"); } #endif return rc; #endif }
ssize_t rtl8169_rx(rtl8169 *r, char *buf, ssize_t buf_len) { uint32 tail; size_t len; int rc; bool release_sem = false; SHOW_FLOW0(3, "rtl8169_rx: entry\n"); if(buf_len < 1500) return -1; restart: sem_acquire(r->rx_sem, 1); mutex_lock(&r->lock); int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); /* look at the descriptor pointed to by rx_idx_free */ if (r->rxdesc[r->rx_idx_free].flags & RTL_DESC_OWN) { /* for some reason it's owned by the card, wait for more packets */ release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } /* process this packet */ len = r->rxdesc[r->rx_idx_free].frame_len & 0x3fff; SHOW_FLOW(3, "rtl8169_rx: desc idx %d: len %d\n", r->rx_idx_free, len); if (len > buf_len) { rc = ERR_TOO_BIG; release_sem = true; goto out; } memcpy(buf, RXBUF(r, r->rx_idx_free), len); rc = len; #if debug_level_flow >= 3 hexdump(RXBUF(r, r->rx_idx_free), len); #endif /* stick it back in the free list */ r->rxdesc[r->rx_idx_free].buffer_size = BUFSIZE_PER_FRAME; r->rxdesc[r->rx_idx_free].flags = (r->rxdesc[r->rx_idx_free].flags & RTL_DESC_EOR) | RTL_DESC_OWN; inc_rx_idx_free(r); /* see if there are more packets pending */ if ((r->rxdesc[r->rx_idx_free].flags & RTL_DESC_OWN) == 0) release_sem = true; // if so, release the rx sem so the next reader gets a shot out: release_spinlock(&r->reg_spinlock); int_restore_interrupts(); if(release_sem) sem_release(r->rx_sem, 1); mutex_unlock(&r->lock); return rc; }