struct stem_cache *stem_cache_new(void (*stemmer) (void *opaque, char *term), void *opaque, unsigned int entries) { struct stem_cache *cache; if ((cache = malloc(sizeof(*cache))) && (cache->arr = malloc(sizeof(*cache->arr) * entries)) && (cache->lookup = chash_ptr_new(bit_log2(entries) + 1, 2.0, (unsigned int (*)(const void *)) str_hash, (int (*)(const void *, const void *)) str_cmp))) { cache->capacity = entries; cache->size = 0; cache->stem = stemmer; cache->opaque = opaque; cache->clock_pos = 0; cache->stemmed = cache->cached = 0; } else { if (cache) { if (cache->arr) { free(cache->arr); } free(cache); cache = NULL; } } return cache; }
uint64_t random_uint64() { uint64_t v = 0; const int rand_shift = bit_log2(RAND_MAX) + 1; int shift = 0; while (shift < 64) { v |= (uint64_t)rand() << shift; shift += rand_shift; } return v; }
struct postings* postings_new(unsigned int tablesize, void (*stem)(void *opaque, char *term), void *opaque, struct stop *list) { struct postings *p = malloc(sizeof(*p)); unsigned int bits = bit_log2(tablesize); tablesize = bit_pow2(bits); /* tablesize is now guaranteed to be a power * of two */ if (p && (p->hash = malloc(sizeof(struct postings_node *) * tablesize)) && (p->string_mem = poolalloc_new(!!DEAR_DEBUG, MEMPOOL_SIZE, NULL)) && (p->node_mem = objalloc_new(sizeof(struct postings_node), 0, !!DEAR_DEBUG, MEMPOOL_SIZE, NULL))) { p->stop = list; p->stem = stem; p->stem_opaque = opaque; p->tblbits = bits; p->tblsize = tablesize; assert(p->tblbits && p->tblsize); p->size = p->dterms = p->terms = 0; p->err = 0; p->docno = 0; p->docs = 0; p->update = NULL; p->update_required = 0; BIT_ARRAY_NULL(p->hash, tablesize); } else if (p) { if (p->hash) { free(p->hash); if (p->string_mem) { poolalloc_delete(p->string_mem); if (p->node_mem) { objalloc_delete(p->node_mem); } } } p = NULL; } return p; }
sval insert_ea_map(struct cpu_thread *thr, uval vsid, uval ea, union ptel entry) { int j = 0; uval lp = LOG_PGSIZE; /* FIXME: get large page size */ uval vaddr = (vsid << LOG_SEGMENT_SIZE) | (ea & (SEGMENT_SIZE - 1)); uval log_ht_size = bit_log2(thr->cpu->os->htab.num_ptes)+ LOG_PTE_SIZE; uval pteg = NUM_PTES_IN_PTEG * get_pteg(vaddr, vsid, lp, log_ht_size); union pte *ht = (union pte *)GET_HTAB(thr->cpu->os); union pte pte; pte.words.vsidWord = 0; pte.words.rpnWord = entry.word; pte.bits.avpn = (vsid << 5) | VADDR_TO_API(vaddr); pte.bits.v = 1; union pte *target = &ht[pteg]; int empty = NUM_PTES_IN_PTEG; uval remove = 0; redo_search: for (; j < NUM_PTES_IN_PTEG; ++j, ++target) { if (target->bits.avpn == pte.bits.avpn) { empty = j; remove = 1; break; } if (j < empty && target->bits.v == 0) { empty = j; } } assert(empty != NUM_PTES_IN_PTEG, "Full htab\n"); target = &ht[pteg + empty]; pte_lock(target); /* Things may have changed since we got the lock */ if (target->bits.v && target->bits.avpn != pte.bits.avpn) goto redo_search; if (remove) { pte_invalidate(&thr->cpu->os->htab, target); ptesync(); do_tlbie(target, pteg + empty); } // hprintf("insert: ea: 0x%lx vsid: 0x%lx -> idx: 0x%lx\n", // ea, vsid, j + pteg); if (pte.bits.v) { pte.bits.lock = 1; /* Make sure new entry is still locked */ pte_insert(&thr->cpu->os->htab, empty + pteg, pte.words.vsidWord, pte.words.rpnWord, ea); } pte_unlock(target); return H_Success; }
int main(int argc, char** argv) { cl_platform_id platform_id; cl_device_id device_id; cl_context context; cl_command_queue command_queue; cl_mem a_mem; cl_program program; cl_kernel kernel; cl_int result; cl_uint count; cl_int* a = malloc(ARRAY_LENGTH * sizeof(cl_int)); cl_int* a0 = malloc(ARRAY_LENGTH * sizeof(cl_int)); cl_uint i; cl_uint stage, pass; (void)argc; (void)argv; if (!a || !a0) return 1; srand(time(NULL)); for (i = 0; i < ARRAY_LENGTH; i++) a[i] = a0[i] = rand(); qsort(a0, ARRAY_LENGTH, sizeof(int), compare); result = clGetPlatformIDs(1, &platform_id, &count); if (result || count != 1) return 1; result = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_DEFAULT, 1, &device_id, &count); if (result || count != 1) return 1; context = clCreateContext(NULL, 1, &device_id, NULL, NULL, &result); if (result) return 1; command_queue = clCreateCommandQueue(context, device_id, 0, &result); if (result) return 1; a_mem = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, ARRAY_LENGTH * sizeof(cl_int), a, &result); if (result) return 1; program = clCreateProgramWithSource(context, 1, &program_source, &program_size, &result); if (result) return 1; result = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL); if (result) return 1; kernel = clCreateKernel(program, "kernel_bitonic", &result); if (result) return 1; result = clSetKernelArg(kernel, 0, sizeof(cl_mem), &a_mem); if (result) return 1; for (stage = 0; stage < bit_log2(ARRAY_LENGTH); stage++) { result = clSetKernelArg(kernel, 1, sizeof(cl_uint), &stage); if (result) return 1; for (pass = 0; pass <= stage; pass++) { result = clSetKernelArg(kernel, 2, sizeof(cl_uint), &pass); if (result) return 1; result = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, NULL); if (result) return 1; } } result = clFinish(command_queue); if (result) return 1; result = clEnqueueReadBuffer(command_queue, a_mem, CL_TRUE, 0, ARRAY_LENGTH * sizeof(cl_int), a, 0, NULL, NULL); if (result) return 1; for (i = 0; i < ARRAY_LENGTH; i++) { if (a[i] != a0[i]) return 1; } result = clReleaseKernel(kernel); if (result) return 1; result = clReleaseProgram(program); if (result) return 1; result = clReleaseMemObject(a_mem); if (result) return 1; result = clReleaseCommandQueue(command_queue); if (result) return 1; result = clReleaseContext(context); if (result) return 1; free(a0); free(a); return 0; }