bool khrn_interlock_read(KHRN_INTERLOCK_T *interlock, KHRN_INTERLOCK_USER_T user) /* user allowed to be KHRN_INTERLOCK_USER_NONE */ { vcos_assert(_count(user) <= 1); if (!(interlock->users & user)) { if (interlock->users & KHRN_INTERLOCK_USER_WRITING) { vcos_assert(_count(interlock->users) == 2); /* exactly 1 writer (plus writing bit) */ khrn_interlock_flush((KHRN_INTERLOCK_USER_T)(interlock->users & ~KHRN_INTERLOCK_USER_WRITING)); vcos_assert(!interlock->users); } interlock->users = (KHRN_INTERLOCK_USER_T)(interlock->users | user); return true; } return false; }
void calibrate_countdown(void) { lcd.print("Starting rover"); lcd.display(); for (int i = 5; i > 0; i--) { _count(i*2 - 1); delay(1000); _count(i*2 - 2); humidity(); moisture(); delay(1000); } lcd.clear(); }
// eval MalVal *eval_ast(MalVal *ast, GHashTable *env) { if (!ast || mal_error) return NULL; if (ast->type == MAL_SYMBOL) { //g_print("EVAL symbol: %s\n", ast->val.string); // TODO: check if not found return g_hash_table_lookup(env, ast->val.string); } else if ((ast->type == MAL_LIST) || (ast->type == MAL_VECTOR)) { //g_print("EVAL sequential: %s\n", _pr_str(ast,1)); MalVal *el = _map2((MalVal *(*)(void*, void*))EVAL, ast, env); if (!el || mal_error) return NULL; el->type = ast->type; return el; } else if (ast->type == MAL_HASH_MAP) { //g_print("EVAL hash_map: %s\n", _pr_str(ast,1)); GHashTableIter iter; gpointer key, value; MalVal *seq = malval_new_list(MAL_LIST, g_array_sized_new(TRUE, TRUE, sizeof(MalVal*), _count(ast))); g_hash_table_iter_init (&iter, ast->val.hash_table); while (g_hash_table_iter_next (&iter, &key, &value)) { MalVal *kname = malval_new_string((char *)key); g_array_append_val(seq->val.array, kname); MalVal *new_val = EVAL((MalVal *)value, env); g_array_append_val(seq->val.array, new_val); } return _hash_map(seq); } else { //g_print("EVAL scalar: %s\n", _pr_str(ast,1)); return ast; } }
// Process void Tree::Process() { switch (_step) { case 0: if (_initialize()) _step++; else return; case 1: if (_count()) _step++; else throw std::runtime_error("read error"); case 2: if (_sum()) _step++; else return; case 3: if (_build()) _step++; else throw std::runtime_error("build error"); case 4: if (_subtrees()) _step++; else throw std::runtime_error("subtree error"); default: _fptree.free(); FinishProcess(); } }
void ist_count (ISTREE *ist, int *set, float *freq, int cnt, int sort) { /* --- count item set in tree */ assert(ist && set && (cnt >= 0)); /* check the function arguments */ if (sort) ist_sort(set, freq, cnt); /* sort the item identifiers */ _count(ist->levels[0], set, 1.,freq, cnt); /* recursively count item set */ ist->setcnt++; /* increment set counter */ } /* ist_count() */
KHRN_INTERLOCK_USER_T khrn_interlock_get_writer(KHRN_INTERLOCK_T *interlock) { vcos_assert(!(interlock->users & KHRN_INTERLOCK_USER_WRITING) || (_count(interlock->users) == 2)); return (interlock->users & KHRN_INTERLOCK_USER_WRITING) ? (KHRN_INTERLOCK_USER_T)(interlock->users & ~KHRN_INTERLOCK_USER_WRITING) : KHRN_INTERLOCK_USER_NONE; }
void Timer::setTime( float time, bool reset ){ this->time = time; if( reset ){ _count(); this->startTime = clock(); } }
void ist_count (ISTREE *ist, int *sets, int cnt, int & nbfreq, int minsup) { /* --- count transaction in tree */ assert(ist /* check the function arguments */ && (cnt >= 0) && (sets || (cnt <= 0))); if (cnt >= ist->lvlcnt) /* recursively count transaction */ _count(ist->levels[0], sets, cnt, ist->lvlcnt, nbfreq, minsup); ist->tacnt++; /* increment the transaction counter */ } /* ist_count() */
unsigned int static_selfindex::count(unsigned char* pattern, unsigned int plen) { //unsigned int plen = strlen((char*)pattern); if(sigma_mapper->map(pattern,plen+1)!=0) { //the pattern has letters which are not present in the text return 0; } return _count(pattern,plen); }
void Timer::update(){ while( time <= clock() - startTime ){ if( _count() ){ break; } } }
static void _countx (ISNODE *node, TATREE *tat, int min, int & nbfreq, int minsup ) { /* --- count t.a. tree recursively */ int i, k, n; /* vector index, loop variable, size */ ISNODE **vec; /* child node vector */ assert(node && tat); /* check the function arguments */ if (tat_max(tat) < min) /* if the transactions are too short, */ return; /* abort the recursion */ k = tat_size(tat); /* get the number of children */ if (k <= 0) { /* if there are no children */ if (k < 0) _count(node, tat_items(tat), -k, min, nbfreq, minsup); return; /* count the normal transaction */ } /* and abort the function */ while (--k >= 0) /* count the transactions recursively */ { _countx(node, tat_child(tat, k), min, nbfreq, minsup); } if (node->chcnt == 0) { /* if this is a new node */ n = node->offset; /* get the index offset */ for (k = tat_size(tat); --k >= 0; ) { i = tat_item(tat,k) -n; /* traverse the items */ if (i < 0) return; /* if before first item, abort */ if (i < node->size) /* if inside the counter range */ { if( node->cnts[i] < minsup ) { node->cnts[i] += tat_cnt(tat_child(tat, k)); if( node->cnts[i] >= minsup ) // count the frequents nbfreq++ ; } else node->cnts[i] += tat_cnt(tat_child(tat, k)); } } } /* count the transaction */ else if (node->chcnt > 0) { /* if there are child nodes */ vec = (ISNODE**)(node->cnts +node->size); n = vec[0]->id; /* get the child node vector */ min--; /* one item less to the deepest nodes */ for (k = tat_size(tat); --k >= 0; ) { i = tat_item(tat,k) -n; /* traverse the items */ if (i < 0) return; /* if before first item, abort */ if ((i < node->chcnt) && vec[i]) { _countx(vec[i], tat_child(tat, k), min, nbfreq, minsup); } } /* if the child exists, */ } /* count the transaction recursively */ } /* _countx() */
bool khrn_interlock_release(KHRN_INTERLOCK_T *interlock, KHRN_INTERLOCK_USER_T user) { vcos_assert(_count(user) == 1); vcos_assert(!(interlock->users & KHRN_INTERLOCK_USER_WRITING) || (interlock->users == (user | KHRN_INTERLOCK_USER_WRITING))); if (interlock->users & user) { interlock->users = (KHRN_INTERLOCK_USER_T)(interlock->users & ~user & ~KHRN_INTERLOCK_USER_WRITING); return true; } return false; }
MalVal *EVAL(MalVal *ast, GHashTable *env) { if (!ast || mal_error) return NULL; //g_print("EVAL: %s\n", _pr_str(ast,1)); if (ast->type != MAL_LIST) { return eval_ast(ast, env); } if (!ast || mal_error) return NULL; // apply list //g_print("EVAL apply list: %s\n", _pr_str(ast,1)); if (_count(ast) == 0) { return ast; } MalVal *a0 = _nth(ast, 0); assert_type(a0, MAL_SYMBOL, "Cannot invoke %s", _pr_str(a0,1)); MalVal *el = eval_ast(ast, env); if (!el || mal_error) { return NULL; } MalVal *(*f)(void *, void*) = (MalVal *(*)(void*, void*))_first(el); //g_print("eval_invoke el: %s\n", _pr_str(el,1)); return f(_nth(el, 1), _nth(el, 2)); }
// Return a string representation of the MalVal arguments. Returned string must // be freed by caller. char *_pr_str_args(MalVal *args, char *sep, int print_readably) { assert_type(args, MAL_LIST|MAL_VECTOR, "_pr_str called with non-sequential args"); int i; char *repr = g_strdup_printf("%s", ""), *repr2 = NULL; for (i=0; i<_count(args); i++) { MalVal *obj = g_array_index(args->val.array, MalVal*, i); if (i != 0) { repr2 = repr; repr = g_strdup_printf("%s%s", repr2, sep); free(repr2); } repr2 = repr; repr = g_strdup_printf("%s%s", repr2, _pr_str(obj, print_readably)); free(repr2); } return repr; }
char *_pr_str_list(MalVal *obj, int print_readably, char start, char end) { int i; char *repr = NULL, *repr_tmp1 = NULL, *repr_tmp2 = NULL; repr = g_strdup_printf("%c", start); for (i=0; i<_count(obj); i++) { repr_tmp1 = _pr_str(g_array_index(obj->val.array, MalVal*, i), print_readably); if (i == 0) { repr = g_strdup_printf("%c%s", start, repr_tmp1); } else { repr_tmp2 = repr; repr = g_strdup_printf("%s %s", repr_tmp2, repr_tmp1); free(repr_tmp2); } free(repr_tmp1); } repr_tmp2 = repr; repr = g_strdup_printf("%s%c", repr_tmp2, end); free(repr_tmp2); return repr; }
bool khrn_interlock_write(KHRN_INTERLOCK_T *interlock, KHRN_INTERLOCK_USER_T user) /* user allowed to be KHRN_INTERLOCK_USER_NONE */ { interlock->users &= ~KHRN_INTERLOCK_USER_INVALID; vcos_assert(_count(user) <= 1); if (!user || (~interlock->users & (user | KHRN_INTERLOCK_USER_WRITING))) { for (;;) { KHRN_INTERLOCK_USER_T other_users, other_user; other_users = (KHRN_INTERLOCK_USER_T)(interlock->users & ~user & KHRN_INTERLOCK_USER_WRITING); if (!other_users) { break; } other_users = (KHRN_INTERLOCK_USER_T)(interlock->users & ~user & ~KHRN_INTERLOCK_USER_WRITING); other_user = (KHRN_INTERLOCK_USER_T)(1 << _msb(other_users)); khrn_interlock_flush(other_user); vcos_assert(!(interlock->users & other_user)); } if (user) { interlock->users = (KHRN_INTERLOCK_USER_T)(interlock->users | user | KHRN_INTERLOCK_USER_WRITING); } return true; } return false; }
static void _count (ISNODE *node, int *sets, int cnt, int min, int & nbfreq, int minsup) { /* --- count transaction recursively */ int i, n; /* vector index and size */ ISNODE **vec; /* child node vector */ assert(node /* check the function arguments */ && (cnt >= 0) && (sets || (cnt <= 0))); if (node->chcnt == 0) { /* if this is a new node */ n = node->offset; /* get the index offset */ while ((cnt > 0) && (*sets < n)) { cnt--; sets++; } /* skip items before first counter */ while (--cnt >= 0) { /* traverse the transaction's items */ i = *sets++ -n; /* compute counter vector index */ if (i >= node->size) return; node->cnts[i]++; /* if the counter exists, */ if( node->cnts[i] == minsup) // count the frequents nbfreq++ ; } } /* count the transaction */ else if (node->chcnt > 0) { /* if there are child nodes */ vec = (ISNODE**)(node->cnts +node->size); n = vec[0]->id; /* get the child node vector */ min--; /* one item less to the deepest nodes */ while ((cnt > min) && (*sets < n)) { cnt--; sets++; } /* skip items before first child */ while (--cnt >= min) { /* traverse the transaction's items */ i = *sets++ -n; /* compute child vector index */ if (i >= node->chcnt) return; if (vec[i]) _count(vec[i], sets, cnt, min, nbfreq, minsup); } /* if the child exists, */ } /* count the transaction recursively */ } /* _count() */
static void _count (ISNODE *node, int *set, float old,float *freq, int cnt) { /* --- count item set recursively */ int i; /* vector index */ float frq; ISNODE **children; /* child node vector */ assert(node && set && (cnt >= 0)); /* check arguments */ children = (ISNODE**)(node->cnts +2*node->size); while (--cnt >= 0) { /* traverse item set */ i = *set++ -node->offs; /* compute counter vector index */ frq = *freq++*old ; if (i < 0) continue; /* if less than first, ignore */ if (i >= node->size) return;/* if greater than last, abort */ /*node->cnts[i]++; */ /* count item set */ node->cnts[i]+=frq; node->occ_square[i]+=frq*frq; if (node->chcnt <= 0) /* if there are no children, */ continue; /* continue with next item */ i += node->offs -ID(children[0]); /* compute child vector index */ if ((i < 0) || (i >= node->chcnt)) continue; /* if index is out of range, continue */ if (children[i]) _count(children[i], set, frq, freq, cnt); } /* count item set recursively */ } /* _count() */
static __inline int _stub_method_3(remote_handle _handle, uint32_t _mid, uint32_t _in0[1], uint32_t _in1[1], void* _in2[1], uint32_t _in2Len[1], void* _rout3[1], uint32_t _rout3Len[1], char* _in4[1], uint32_t _in4Len[1]) { remote_arg* _pra; int _numIn[1]; int _numROut[1]; char* _seq_nat2; int _ii; char* _seq_nat3; _allocator _al[1] = {{0}}; uint32_t _primIn[5]; remote_arg* _praIn; remote_arg* _praROut; remote_arg* _praROutPost; remote_arg** _ppraROutPost = &_praROutPost; remote_arg** _ppraIn = &_praIn; remote_arg** _ppraROut = &_praROut; char* _seq_primIn2; int _nErr = 0; char* _seq_primIn3; _numIn[0] = 3; _numROut[0] = 0; for(_ii = 0, _seq_nat2 = (char*)_in2[0];_ii < (int)_in2Len[0];++_ii, _seq_nat2 = (_seq_nat2 + SLIM_IFPTR32(8, 16))) { _count_1(_numIn, _numROut, SLIM_IFPTR32((char**)&(((uint32_t*)_seq_nat2)[0]), (char**)&(((uint64_t*)_seq_nat2)[0])), SLIM_IFPTR32((uint32_t*)&(((uint32_t*)_seq_nat2)[1]), (uint32_t*)&(((uint32_t*)_seq_nat2)[2]))); } for(_ii = 0, _seq_nat3 = (char*)_rout3[0];_ii < (int)_rout3Len[0];++_ii, _seq_nat3 = (_seq_nat3 + SLIM_IFPTR32(8, 16))) { _count(_numIn, _numROut, SLIM_IFPTR32((char**)&(((uint32_t*)_seq_nat3)[0]), (char**)&(((uint64_t*)_seq_nat3)[0])), SLIM_IFPTR32((uint32_t*)&(((uint32_t*)_seq_nat3)[1]), (uint32_t*)&(((uint32_t*)_seq_nat3)[2]))); } _allocator_init(_al, 0, 0); _ALLOCATE(_nErr, _al, ((((_numIn[0] + _numROut[0]) + 1) + 0) * sizeof(_pra[0])), 4, _pra); _pra[0].buf.pv = (void*)_primIn; _pra[0].buf.nLen = sizeof(_primIn); _praIn = (_pra + 1); _praROut = (_praIn + _numIn[0] + 0); _praROutPost = _praROut; _COPY(_primIn, 0, _in0, 0, 4); _COPY(_primIn, 4, _in1, 0, 4); _COPY(_primIn, 8, _in2Len, 0, 4); _ALLOCATE(_nErr, _al, (_in2Len[0] * 4), 4, _praIn[0].buf.pv); _praIn[0].buf.nLen = (4 * _in2Len[0]); for(_ii = 0, _seq_primIn2 = (char*)_praIn[0].buf.pv, _seq_nat2 = (char*)_in2[0];_ii < (int)_in2Len[0];++_ii, _seq_primIn2 = (_seq_primIn2 + 4), _seq_nat2 = (_seq_nat2 + SLIM_IFPTR32(8, 16))) { _TRY(_nErr, _stub_pack_1(_al, (_praIn + 1), _ppraIn, (_praROut + 0), _ppraROut, _seq_primIn2, 0, SLIM_IFPTR32((char**)&(((uint32_t*)_seq_nat2)[0]), (char**)&(((uint64_t*)_seq_nat2)[0])), SLIM_IFPTR32((uint32_t*)&(((uint32_t*)_seq_nat2)[1]), (uint32_t*)&(((uint32_t*)_seq_nat2)[2])))); } _COPY(_primIn, 12, _rout3Len, 0, 4); _ALLOCATE(_nErr, _al, (_rout3Len[0] * 4), 4, _praIn[1].buf.pv); _praIn[1].buf.nLen = (4 * _rout3Len[0]); for(_ii = 0, _seq_primIn3 = (char*)_praIn[1].buf.pv, _seq_nat3 = (char*)_rout3[0];_ii < (int)_rout3Len[0];++_ii, _seq_primIn3 = (_seq_primIn3 + 4), _seq_nat3 = (_seq_nat3 + SLIM_IFPTR32(8, 16))) { _TRY(_nErr, _stub_pack(_al, (_praIn + 2), _ppraIn, (_praROut + 0), _ppraROut, _seq_primIn3, 0, SLIM_IFPTR32((char**)&(((uint32_t*)_seq_nat3)[0]), (char**)&(((uint64_t*)_seq_nat3)[0])), SLIM_IFPTR32((uint32_t*)&(((uint32_t*)_seq_nat3)[1]), (uint32_t*)&(((uint32_t*)_seq_nat3)[2])))); } _COPY(_primIn, 16, _in4Len, 0, 4); _praIn[2].buf.pv = _in4[0]; _praIn[2].buf.nLen = (8 * _in4Len[0]); _TRY(_nErr, __QAIC_REMOTE(remote_handle_invoke)(_handle, REMOTE_SCALARS_MAKEX(0, _mid, (_numIn[0] + 1), (_numROut[0] + 0), 0, 0), _pra)); for(_ii = 0, _seq_nat2 = (char*)_in2[0];_ii < (int)_in2Len[0];++_ii, _seq_nat2 = (_seq_nat2 + SLIM_IFPTR32(8, 16))) { _TRY(_nErr, _stub_unpack_1((_praROutPost + 0), _ppraROutPost, 0, SLIM_IFPTR32((char**)&(((uint32_t*)_seq_nat2)[0]), (char**)&(((uint64_t*)_seq_nat2)[0])), SLIM_IFPTR32((uint32_t*)&(((uint32_t*)_seq_nat2)[1]), (uint32_t*)&(((uint32_t*)_seq_nat2)[2])))); } for(_ii = 0, _seq_nat3 = (char*)_rout3[0];_ii < (int)_rout3Len[0];++_ii, _seq_nat3 = (_seq_nat3 + SLIM_IFPTR32(8, 16))) { _TRY(_nErr, _stub_unpack((_praROutPost + 0), _ppraROutPost, 0, SLIM_IFPTR32((char**)&(((uint32_t*)_seq_nat3)[0]), (char**)&(((uint64_t*)_seq_nat3)[0])), SLIM_IFPTR32((uint32_t*)&(((uint32_t*)_seq_nat3)[1]), (uint32_t*)&(((uint32_t*)_seq_nat3)[2])))); } _CATCH(_nErr) {} _allocator_deinit(_al); return _nErr; }
bool static_selfindex::_exist(unsigned char* pattern, unsigned int plen) { unsigned int count = _count(pattern,plen); return count>0; }
/** * create a fixed pool of relocatable objects. * * return (opaque) pointer to the newly created pool, or NULL if * there was insufficient memory. * * @param size Size of each sub-object * @param num Number of sub-objects * @param align Alignment of sub-objects * @param flags Flags * @param name A name for this pool * @param overhead Allocate additional space in the non-moveable heap * * If flags include VC_POOL_FLAGS_SUBDIVISIBLE we get a single relocatable * memory block large enough for all 'n' objects; it can either be used * as a single block, or divided up into 'n' of them. * -------------------------------------------------------------------- */ VC_POOL_T * vc_pool_create( size_t size, uint32_t num, uint32_t align, VC_POOL_FLAGS_T flags, const char *name, uint32_t overhead_size ) { int i; int mem_flags = MEM_FLAG_NO_INIT; vcos_assert(size != 0); vcos_assert(num != 0); vcos_assert(name); overhead_size = (overhead_size+OVERHEAD_ALIGN-1) & ~(OVERHEAD_ALIGN-1); // allocate and zero main struct int alloc_len = sizeof(VC_POOL_T) + num * sizeof(VC_POOL_OBJECT_T) + num * overhead_size; VC_POOL_T *pool = (VC_POOL_T*)rtos_prioritymalloc( alloc_len, RTOS_ALIGN_DEFAULT, RTOS_PRIORITY_UNIMPORTANT, "vc_pool" ); if ( !pool ) return NULL; // failed to allocate pool memset( pool, 0, alloc_len ); // array of pool objects pool->object = (VC_POOL_OBJECT_T *)((unsigned char *)pool + sizeof(VC_POOL_T)); // initialise pool->magic = POOL_MAGIC; pool->latch = rtos_latch_unlocked(); if ( flags & VC_POOL_FLAGS_DIRECT ) mem_flags |= MEM_FLAG_DIRECT; if ( flags & VC_POOL_FLAGS_COHERENT ) mem_flags |= MEM_FLAG_COHERENT; if ( flags & VC_POOL_FLAGS_HINT_PERMALOCK ) mem_flags |= MEM_FLAG_HINT_PERMALOCK; if ( align == 0 ) align = 32; // minimum 256-bit aligned vcos_assert( _count(align) == 1 ); // must be power of 2 pool->alignment = align; pool->overhead = (uint8_t*)(pool+1) + num*sizeof(VC_POOL_OBJECT_T); pool->overhead_size = overhead_size; pool->name = name; pool->max_objects = num; pool->pool_flags = flags; if ( flags & VC_POOL_FLAGS_SUBDIVISIBLE ) { // a single mem_handle, shared between objects uint32_t rounded_size = (size + align - 1) & ~(align - 1); pool->mem = mem_alloc( rounded_size, align, (MEM_FLAG_T)mem_flags, name ); if ( pool->mem == MEM_INVALID_HANDLE ) { // out of memory... clean up nicely and return error rtos_priorityfree( pool ); return NULL; } pool->nobjects = 0; pool->object_size = 0; pool->max_object_size = rounded_size; } else { // bunch of individual objects for (i=0; i<num; i++) { MEM_HANDLE_T mem = mem_alloc( size, align, (MEM_FLAG_T)mem_flags, name ); pool->object[i].mem = mem; // all ->offset fields are 0 from the previous memset if ( mem == MEM_INVALID_HANDLE ) { // out of memory... clean up nicely and return error while (i > 0) mem_release( pool->object[--i].mem ); rtos_priorityfree( pool ); return NULL; // failed to allocate pool } // pointer to 'overhead' memory for this entry pool->object[i].overhead = pool->overhead + i*pool->overhead_size; } pool->mem = MEM_INVALID_HANDLE; pool->nobjects = num; pool->object_size = size; pool->max_object_size = size; } create_event( pool ); // link into global list rtos_latch_get(&pool_list_latch); pool->next = vc_pool_list; vc_pool_list = pool; rtos_latch_put(&pool_list_latch); // done return pool; }
static int _random(int list[]) { if (spoiler_hack) return list[0]; return list[randint0(_count(list))]; }