swTableRow* swTableRow_set(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; sw_spinlock(lock); if (row->active) { for (;;) { if (row->crc32 == crc32) { break; } else if (row->next == NULL) { table->lock.lock(&table->lock); swTableRow *new_row = table->pool->alloc(table->pool, 0); #ifdef SW_TABLE_DEBUG conflict_count ++; #endif table->lock.unlock(&table->lock); if (!new_row) { sw_spinlock_release(lock); return NULL; } //add row_num bzero(new_row, sizeof(swTableRow)); sw_atomic_fetch_add(&(table->row_num), 1); row->next = new_row; row = new_row; break; } else { row = row->next; } } } else { #ifdef SW_TABLE_DEBUG insert_count ++; #endif sw_atomic_fetch_add(&(table->row_num), 1); // when the root node become active, we may need compress the jump table if (table->list_n >= table->size - 1) { swTable_compress_list(table); } table->rows_list[table->list_n] = row; row->list_index = table->list_n; sw_atomic_fetch_add(&table->list_n, 1); } row->crc32 = crc32; row->active = 1; swTrace("row=%p, crc32=%u, key=%s\n", row, crc32, key); sw_spinlock_release(lock); return row; }
int swTableRow_del(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; //no exists if (!row->active) { return SW_ERR; } sw_spinlock(lock); if (row->next == NULL) { if (row->crc32 == crc32) { table->rows_list[row->list_index] = NULL; if (table->iterator->skip_count > table->compress_threshold) { swTable_compress_list(table); } bzero(row, sizeof(swTableRow)); goto delete_element; } else { goto not_exists; } } else { swTableRow *tmp = row; swTableRow *prev = NULL; while (tmp) { if (tmp->crc32 == crc32) { break; } prev = tmp; tmp = tmp->next; } if (tmp == NULL) { not_exists: sw_spinlock_release(lock); return SW_ERR; } //when the deleting element is root, we should move the first element's data to root, //and remove the element from the collision list. if (tmp == row) { tmp = tmp->next; row->next = tmp->next; if (table->iterator->skip_count > table->compress_threshold) { swTable_compress_list(table); } memcpy(row->data, tmp->data, table->item_size); } if (prev) { prev->next = tmp->next; } table->lock.lock(&table->lock); bzero(tmp, sizeof(swTableRow)); table->pool->free(table->pool, tmp); table->lock.unlock(&table->lock); } delete_element: sw_atomic_fetch_sub(&(table->row_num), 1); sw_spinlock_release(lock); return SW_OK; }
swTableRow* swTableRow_set(swTable *table, char *key, int keylen, sw_atomic_t **rowlock) { if (keylen > SW_TABLE_KEY_SIZE) { keylen = SW_TABLE_KEY_SIZE; } swTableRow *row = swTable_hash(table, key, keylen); sw_atomic_t *lock = &row->lock; sw_spinlock(lock); *rowlock = lock; if (row->active) { for (;;) { if (strncmp(row->key, key, keylen) == 0) { break; } else if (row->next == NULL) { table->lock.lock(&table->lock); swTableRow *new_row = table->pool->alloc(table->pool, 0); #ifdef SW_TABLE_DEBUG conflict_count ++; #endif table->lock.unlock(&table->lock); if (!new_row) { return NULL; } //add row_num bzero(new_row, sizeof(swTableRow)); sw_atomic_fetch_add(&(table->row_num), 1); row->next = new_row; row = new_row; break; } else { row = row->next; } } } else { #ifdef SW_TABLE_DEBUG insert_count ++; #endif sw_atomic_fetch_add(&(table->row_num), 1); // when the root node become active, we may need compress the jump table if (table->list_n >= table->size - 1) { swTable_compress_list(table); } table->lock.lock(&table->lock); table->rows_list[table->list_n] = row; table->lock.unlock(&table->lock); row->list_index = table->list_n; sw_atomic_fetch_add(&table->list_n, 1); } memcpy(row->key, key, keylen); row->active = 1; return row; }