int kv_create_bucket(const char *account, const char *bucket) { uint32_t account_vid, vid; char vdi_name[SD_MAX_VDI_LEN]; int ret; ret = sd_lookup_vdi(account, &account_vid); if (ret != SD_RES_SUCCESS) { sd_err("Failed to find account %s", account); return ret; } sys->cdrv->lock(account_vid); snprintf(vdi_name, SD_MAX_VDI_LEN, "%s/%s", account, bucket); ret = sd_lookup_vdi(vdi_name, &vid); if (ret == SD_RES_SUCCESS) { sd_err("bucket %s is exists.", bucket); ret = SD_RES_VDI_EXIST; goto out; } if (ret != SD_RES_NO_VDI) goto out; ret = bucket_create(account, account_vid, bucket); out: sys->cdrv->unlock(account_vid); return ret; }
/** * Put a key into the hashmap or test for membership. The * destination bucket is decided by modding the hash by * the number of bucket slots available. This gives us up * to 4 billion buckets maximum. * @param map the hashmap to add to * @param key the key to put in there * @param value at the key * @param replace if 1 then replace any value already in the bucket * @return 1 if it was added, 0 otherwise (already there) */ int hashmap_put( hashmap *map, char *key, void *value ) { unsigned hashval,slot; struct bucket *d; if ( (float)map->num_keys/(float)map->num_buckets > MAX_RATIO ) { if ( !hashmap_rehash(map) ) return 0; } hashval = hash( key, strlen(key) ); slot = hashval % map->num_buckets; d = bucket_create( key, value ); if ( d != NULL ) { if ( map->buckets[slot] == NULL ) { map->num_keys++; map->buckets[slot] = d; } else { struct bucket *prev = NULL; struct bucket *c = map->buckets[slot]; do { if (strcmp(c->key,key)==0 ) { // key already present: replace if ( prev != NULL ) prev->next = d; else map->buckets[slot] = d; d->next = c->next; free( c->key ); free( c ); break; } else if ( c->next == NULL ) { c->next = d; map->num_keys++; break; } prev = c; c = c->next; } while ( c != NULL ); } return 1; } else return 0; }
/** * Reallocate all the keys in a new bucket map that must be * 1.5 times bigger than before * @param map the map to rehash */ static int hashmap_rehash( hashmap *map ) { int i,new_size = map->num_buckets + map->num_buckets/2; struct bucket **new_buckets = calloc( new_size, sizeof(struct bucket*) ); if ( new_buckets == NULL ) { warning("hashmap: failed to resize hash table\n"); return 0; } // copy the old keys over for ( i=0;i<map->num_buckets;i++ ) { if ( map->buckets[i] ) { struct bucket *b = map->buckets[i]; while ( b != NULL ) { unsigned slot = hash((unsigned char*)b->key, strlen(b->key))%new_size; struct bucket *d = bucket_create(b->key,b->value); if ( d==NULL ) return 0; else if ( new_buckets[slot] == NULL ) new_buckets[slot] = d; else { struct bucket *c = new_buckets[slot]; while ( c->next != NULL ) c = c->next; c->next = d; } b = b->next; } // gets rid of all connected buckets b bucket_dispose( map->buckets[i] ); } } free( map->buckets ); map->num_buckets = new_size; map->buckets = new_buckets; return 1; }
static int _impl_setup(rh_aout_api_itf self) { extern AAssetManager * __rh_hack_get_android_asset_manager(); static const SLEngineOption options[] = { { SL_ENGINEOPTION_THREADSAFE, SL_BOOLEAN_TRUE }, { SL_ENGINEOPTION_LOSSOFCONTROL, SL_BOOLEAN_FALSE }, }; struct sles_api_instance * instance = (struct sles_api_instance *)self; instance->asset_manager = __rh_hack_get_android_asset_manager(); if(!instance->asset_manager) goto bad; if (SL_RESULT_SUCCESS != slCreateEngine(&instance->engineObject, sizeof(options) / sizeof(options[0]), options, 0, NULL, NULL)) goto bad; if (SL_RESULT_SUCCESS != (*instance->engineObject)->Realize(instance->engineObject, SL_BOOLEAN_FALSE )) goto bad; if (SL_RESULT_SUCCESS != (*instance->engineObject)->GetInterface(instance->engineObject, SL_IID_ENGINE, &instance->engineItf)) goto bad; if (SL_RESULT_SUCCESS != (*instance->engineItf)->CreateOutputMix(instance->engineItf, &instance->outputMix, 0, NULL, NULL)) goto bad; if (SL_RESULT_SUCCESS != (*instance->outputMix)->Realize(instance->outputMix, SL_BOOLEAN_FALSE )) goto bad; if( pipe( &instance->cmd_pipe.read ) != 0 ) goto bad; if(fcntl( instance->cmd_pipe.read, F_SETFL, O_NONBLOCK) != 0) goto bad; if(bucket_create(&instance->aout_itf_bucket) != 0) goto bad; if(add_channels(self, 3) != 0) goto bad; { pthread_t thread; if(pthread_create(&thread, NULL, &api_main_loop, (void*)self) != 0) goto bad; instance->thread = thread; } pthread_detach( instance->thread ); good: return 0; bad: if( instance->outputMix ) (*instance->outputMix)->Destroy(instance->outputMix); if( instance->engineObject ) (*instance->engineObject)->Destroy(instance->engineObject); if(instance->aout_itf_bucket) { close_all_channels(self); bucket_free(instance->aout_itf_bucket); } if(instance->cmd_pipe.write) close(instance->cmd_pipe.write); if(instance->cmd_pipe.read) close(instance->cmd_pipe.read); return -1; }
/* * Bucket-sort algorithm. */ extern void bucketsort(int *array, int n) { int max; /* Maximum number. */ int i, j; /* Loop indexes. */ int range; /* Bucket range. */ struct minibucket *minib; /* Working mini-bucket. */ struct message *msg; /* Working message. */ struct bucket **todo; /* Todo buckets. */ struct bucket **done; /* Done buckets. */ uint64_t start, end; /* Timers. */ /* Setup slaves. */ open_noc_connectors(); spawn_slaves(); sync_slaves(); todo = smalloc(NUM_BUCKETS*sizeof(struct bucket *)); done = smalloc(NUM_BUCKETS*sizeof(struct bucket *)); for (i = 0; i < NUM_BUCKETS; i++) { done[i] = bucket_create(); todo[i] = bucket_create(); } /* Find max number in the array. */ start = timer_get(); max = INT_MIN; for (i = 0; i < n; i++) { /* Found. */ if (array[i] > max) max = array[i]; } /* Distribute numbers. */ range = max/NUM_BUCKETS; for (i = 0; i < n; i++) { j = array[i]/range; if (j >= NUM_BUCKETS) j = NUM_BUCKETS - 1; bucket_insert(&todo[j], array[i]); } end = timer_get(); master += timer_diff(start, end); /* Sort buckets. */ j = 0; for (i = 0; i < NUM_BUCKETS; i++) { while (bucket_size(todo[i]) > 0) { minib = bucket_pop(todo[i]); /* Send message. */ msg = message_create(SORTWORK, i, minib->size); message_send(outfd[j], msg); message_destroy(msg); /* Send data. */ communication += data_send(outfd[j], minib->elements, minib->size*sizeof(int)); minibucket_destroy(minib); j++; /* * Slave processes are busy. * So let's wait for results. */ if (j == nclusters) { /* Receive results. */ for (/* NOOP */ ; j > 0; j--) { /* Receive message. */ msg = message_receive(infd[nclusters - j]); /* Receive mini-bucket. */ minib = minibucket_create(); minib->size = msg->u.sortresult.size; communication += data_receive(infd[nclusters -j], minib->elements, minib->size*sizeof(int)); bucket_push(done[msg->u.sortresult.id], minib); message_destroy(msg); } } } } /* Receive results. */ for (/* NOOP */ ; j > 0; j--) { /* Receive message. */ msg = message_receive(infd[j - 1]); /* Receive bucket. */ minib = minibucket_create(); minib->size = msg->u.sortresult.size; communication += data_receive(infd[j - 1], minib->elements, minib->size*sizeof(int)); bucket_push(done[msg->u.sortresult.id], minib); message_destroy(msg); } start = timer_get(); rebuild_array(done, array); end = timer_get(); master += timer_diff(start, end); /* House keeping. */ for (i = 0; i < NUM_BUCKETS; i++) { bucket_destroy(todo[i]); bucket_destroy(done[i]); } free(done); free(todo); join_slaves(); close_noc_connectors(); }