static boolean cmGetPAInfo(u64 PageAddress, s64 * LatestTime, s32 * HighestCC) { s64 Time = 0; u16 Header, go_on = 1; s32 pb_pos = -1; critical_enter(&cm_critical); if(cmGetCurrentTimeAndCap(PageAddress,LatestTime,HighestCC)) { critical_leave(&cm_critical); return TRUE; } if (!cmGetReadState(PageAddress, &ReadBuffer)) { critical_leave(&cm_critical); return FALSE; } do { cmReadStrokeInfo(&ReadBuffer); Header = ReadBuffer.Header; if ((Header & SH_TYPE_MASK) == SH_TYPE_NORMAL) Time = ReadBuffer.StartTime; go_on = cmReadToNextStroke( &ReadBuffer ) > 0; } while (go_on); cmReleaseReadState(&ReadBuffer); *LatestTime = ReadBuffer.StartTime; *HighestCC = ReadBuffer.Cap; critical_leave(&cm_critical); return TRUE; }
int cb_rem_blocking(CircBuff_t * cb, float * in, const size_t len) { if (cb->invalid) return CB_ERROR; if (len <= 0) return CB_OK; size_t items_inside = cb->buffer_size - cb->remaining_capacity; while (items_inside < len) { // if the size of the buffer is not large enough, request a resize during the next add if (len*cb->size_coeff > cb->buffer_size) cb->desired_buf_size = len*cb->size_coeff; const size_t before_items_inside = items_inside; cb->is_waiting = 1; if (mutex_wait(&cb->locker) == THREAD_TIMEOUT) { cb->is_waiting = 0; return CB_EMPTY; } cb->is_waiting = 0; items_inside = cb->buffer_size - cb->remaining_capacity; if (before_items_inside == items_inside) return CB_EMPTY; // if there are not enough items } if (cb->invalid) return CB_ERROR; critical_enter(&cb->mutex); #if ASSERT_ENABLED assert(((cb->pos + cb->remaining_capacity) % cb->buffer_size) == cb->rempos); #endif if (cb->buffer_size - cb->remaining_capacity < len) { critical_leave(&cb->mutex); return CB_EMPTY; } const size_t oldrempos = cb->rempos; cb->rempos = (oldrempos + len) % cb->buffer_size; // calculate new position if (cb->rempos <= oldrempos) { // we have to wrap around const size_t remaining = cb->buffer_size - oldrempos; memcpy(in, (void *) &cb->buffer[oldrempos], remaining*sizeof(float)); memcpy(&in[remaining], (void *) cb->buffer, cb->rempos*sizeof(float)); } else { // we don't have to wrap around memcpy(in, (void *) &cb->buffer[oldrempos], len*sizeof(float)); } cb->remaining_capacity += len; // we have removed len items critical_leave(&cb->mutex); return CB_OK; }
fastcall void irq_handler(pt_regs_t *regs) { assert(!critical_inside(CRITICAL_IRQ_LOCK)); critical_enter(CRITICAL_IRQ_HANDLER); { int irq = regs->trapno - 0x20; ipl_enable(); irq_dispatch(irq); /* next lines ordered this way thats why. * On eoi current irq unmasked and may occur again right there, * on irq stack. It may repeat till stack exhaustion. * Disabling ipl first prevents irq handling of same or lower * level till switched to lower critical level. */ ipl_disable(); irqctrl_eoi(irq); } critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
void cb_purge(CircBuff_t * cb) { if (cb->invalid) return; critical_enter(&cb->mutex); cb->remaining_capacity = cb->buffer_size; // how many elements could be loaded cb->pos = 0; // where the next element will be added cb->rempos = 0; // where the next element will be taken from if (cb->is_waiting) mutex_signal(&cb->locker); critical_leave(&cb->mutex); }
int cb_rem_nonblocking(CircBuff_t * cb, float * in, const size_t len) { if (cb->invalid) return CB_ERROR; if (len <= 0) return CB_OK; size_t items_inside = cb->buffer_size - cb->remaining_capacity; while (items_inside < len) return CB_EMPTY; // if there are not enough items if (cb->invalid) return CB_ERROR; critical_enter(&cb->mutex); #if ASSERT_ENABLED assert(((cb->pos + cb->remaining_capacity) % cb->buffer_size) == cb->rempos); #endif if (cb->buffer_size - cb->remaining_capacity < len) { critical_leave(&cb->mutex); return CB_EMPTY; } const size_t oldrempos = cb->rempos; cb->rempos = (oldrempos + len) % cb->buffer_size; // calculate new position if (cb->rempos <= oldrempos) { // we have to wrap around const size_t remaining = cb->buffer_size - oldrempos; memcpy(in, (void *) &cb->buffer[oldrempos], remaining*sizeof(float)); memcpy(&in[remaining], (void *) cb->buffer, cb->rempos*sizeof(float)); } else { // we don't have to wrap around memcpy(in, (void *) &cb->buffer[oldrempos], len*sizeof(float)); } cb->remaining_capacity += len; // we have removed len items critical_leave(&cb->mutex); return CB_OK; }
void cb_free(CircBuff_t * cb) { if (cb->invalid) return; critical_enter(&cb->mutex); free((void *) cb->buffer); cb->invalid = 1; if (cb->is_waiting) mutex_signal(&cb->locker); critical_leave(&cb->mutex); mutex_free(&cb->locker); mutex_free(&cb->mutex); }
void interrupt_handle(void) { assert(!critical_inside(CRITICAL_IRQ_LOCK)); critical_enter(CRITICAL_IRQ_HANDLER); __raspi__dispatch_bank(regs->irq_pending_1, 0); __raspi__dispatch_bank(regs->irq_pending_2, (1 << 5)); /* * 31:21 bits are unused, 20:8 are used for speeding up interrupts * processing by adding a number of 'normal' interrupt status bits there. * It might be used in order to improve this driver later, but for now * we apply a 0xFF mask to distinguish unique interrupt requests. */ __raspi__dispatch_bank(regs->irq_basic_pending & 0xFF, (2 << 5)); critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
void interrupt_handle(void) { unsigned int irq = REG_LOAD(GICC_IAR); if (irq == SPURIOUS_IRQ) return; /* TODO check if IRQ number is correct */ assert(!critical_inside(CRITICAL_IRQ_LOCK)); irqctrl_disable(irq); irqctrl_eoi(irq); critical_enter(CRITICAL_IRQ_HANDLER); { ipl_enable(); irq_dispatch(irq); ipl_disable(); } irqctrl_enable(irq); critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
int cb_add(CircBuff_t * cb, float * in, const size_t len) { if (cb->invalid) return CB_ERROR; if (len <= 0) return CB_OK; // handle edge case critical_enter(&cb->mutex); #if ASSERT_ENABLED assert(((cb->pos + cb->remaining_capacity) % cb->buffer_size) == cb->rempos); #endif // if the size of the buffer is not large enough, request the buffer to be resized if (len*cb->size_coeff > cb->buffer_size) cb->desired_buf_size = len*cb->size_coeff; if (cb->buffer_size < cb->desired_buf_size) { const size_t items_inside = cb->buffer_size - cb->remaining_capacity; const size_t inflation = cb->desired_buf_size - cb->buffer_size; // if we need to resize the buffer, reset it cb->buffer = (float *) realloc((void *) cb->buffer, sizeof(float) * cb->desired_buf_size); // reallocate if (cb->rempos >= cb->pos) { memmove((void *) &cb->buffer[cb->rempos+inflation], (void *) &cb->buffer[cb->rempos], sizeof(float) * (cb->buffer_size-cb->rempos)); if (items_inside != 0) cb->rempos += inflation; } cb->remaining_capacity += inflation; cb->buffer_size = cb->desired_buf_size; // set the size #if ASSERT_ENABLED assert(cb->buffer_size - cb->remaining_capacity == items_inside); #endif } if (cb->buffering && cb->remaining_capacity < 2*len) { cb->buffering = 0; critical_leave(&cb->mutex); return CB_FULL; // if there is not enough space to put buffer, return error } else if (cb->remaining_capacity < len) { cb->buffering = 1; if (cb->size_coeff < cb->max_size_coeff) cb->size_coeff++; critical_leave(&cb->mutex); return CB_FULL; // if there is not enough space to put buffer, return error } const size_t oldpos = cb->pos; cb->pos = (oldpos + len) % cb->buffer_size; // calculate new position cb->remaining_capacity -= len; // the remaining capacity is reduced if (cb->pos <= oldpos) { // the add will wrap around const size_t remaining = cb->buffer_size - oldpos; memcpy((void *) &cb->buffer[oldpos], in, remaining * sizeof(float)); memcpy((void *) cb->buffer, &in[remaining], cb->pos * sizeof(float)); } else { // the add will not wrap around memcpy((void *) &cb->buffer[oldpos], in, len*sizeof(float)); } if (cb->is_waiting) mutex_signal(&cb->locker); critical_leave(&cb->mutex); return CB_OK; }