swTableRow* swTableRow_get(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; swTrace("row=%p, crc32=%u, key=%s\n", row, crc32, key); sw_spinlock(lock); for (;;) { if (row->crc32 == crc32) { if (!row->active) { row = NULL; } break; } else if (row->next == NULL) { row = NULL; break; } else { row = row->next; } } sw_spinlock_release(lock); return row; }
int swTableRow_del(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; //no exists if (!row->active) { return SW_ERR; } sw_spinlock(lock); if (row->next == NULL) { if (row->crc32 == crc32) { table->rows_list[row->list_index] = NULL; if (table->iterator->skip_count > table->compress_threshold) { swTable_compress_list(table); } bzero(row, sizeof(swTableRow)); goto delete_element; } else { goto not_exists; } } else { swTableRow *tmp = row; swTableRow *prev = NULL; while (tmp) { if (tmp->crc32 == crc32) { break; } prev = tmp; tmp = tmp->next; } if (tmp == NULL) { not_exists: sw_spinlock_release(lock); return SW_ERR; } //when the deleting element is root, we should move the first element's data to root, //and remove the element from the collision list. if (tmp == row) { tmp = tmp->next; row->next = tmp->next; if (table->iterator->skip_count > table->compress_threshold) { swTable_compress_list(table); } memcpy(row->data, tmp->data, table->item_size); } if (prev) { prev->next = tmp->next; } table->lock.lock(&table->lock); bzero(tmp, sizeof(swTableRow)); table->pool->free(table->pool, tmp); table->lock.unlock(&table->lock); } delete_element: sw_atomic_fetch_sub(&(table->row_num), 1); sw_spinlock_release(lock); return SW_OK; }
swTableRow* swTableRow_set(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; sw_spinlock(lock); if (row->active) { for (;;) { if (row->crc32 == crc32) { break; } else if (row->next == NULL) { table->lock.lock(&table->lock); swTableRow *new_row = table->pool->alloc(table->pool, 0); #ifdef SW_TABLE_DEBUG conflict_count ++; #endif table->lock.unlock(&table->lock); if (!new_row) { sw_spinlock_release(lock); return NULL; } //add row_num bzero(new_row, sizeof(swTableRow)); sw_atomic_fetch_add(&(table->row_num), 1); row->next = new_row; row = new_row; break; } else { row = row->next; } } } else { #ifdef SW_TABLE_DEBUG insert_count ++; #endif sw_atomic_fetch_add(&(table->row_num), 1); // when the root node become active, we may need compress the jump table if (table->list_n >= table->size - 1) { swTable_compress_list(table); } table->rows_list[table->list_n] = row; row->list_index = table->list_n; sw_atomic_fetch_add(&table->list_n, 1); } row->crc32 = crc32; row->active = 1; swTrace("row=%p, crc32=%u, key=%s\n", row, crc32, key); sw_spinlock_release(lock); return row; }
int swTableRow_del(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; int i = 0; sw_spinlock(lock); if (row->active) { for (;; i++) { if (row->crc32 == crc32) { if (i > 0) { table->lock.lock(&table->lock); table->pool->free(table->pool, row); table->lock.unlock(&table->lock); } break; } else if (row->next == NULL) { sw_spinlock_release(lock); return SW_ERR; } else { row = row->next; } } #ifdef SW_TABLE_USE_LINKED_LIST if (row->list_prev != NULL) { row->list_prev->list_next = row->list_next; } else { table->head = row->list_next; } if (row->list_next != NULL) { row->list_next->list_prev = row->list_prev; } else { table->tail = row->list_prev; } if (table->iterator->tmp_row == row) { table->iterator->tmp_row = row->list_next; } #endif } if (row->active) { sw_atomic_fetch_sub(&(table->row_num), 1); } row->active = 0; sw_spinlock_release(lock); return SW_OK; }
swTableRow* swTableRow_set(swTable *table, char *key, int keylen) { swTableRow *row = swTable_hash(table, key, keylen); uint32_t crc32 = swoole_crc32(key, keylen); sw_atomic_t *lock = &row->lock; sw_spinlock(lock); if (row->active) { for (;;) { if (row->crc32 == crc32) { break; } else if (row->next == NULL) { table->lock.lock(&table->lock); swTableRow *new_row = table->pool->alloc(table->pool, 0); table->lock.unlock(&table->lock); if (!new_row) { sw_spinlock_release(lock); return NULL; } //add row_num sw_atomic_fetch_add(&(table->row_num), 1); row->next = new_row; row = new_row; break; } else { row = row->next; } } } else { sw_atomic_fetch_add(&(table->row_num), 1); } #ifdef SW_TABLE_USE_LINKED_LIST if (!row->active) { row->list_next = NULL; if (table->head) { row->list_prev = table->tail; table->tail->list_next = row; table->tail = row; } else { table->head = table->tail = row; row->list_prev = NULL; table->iterator->tmp_row = row; } } #endif row->crc32 = crc32; row->active = 1; swTrace("row=%p, crc32=%u, key=%s\n", row, crc32, key); sw_spinlock_release(lock); return row; }