/** * mc_do_slabs_newslab() - alloc a new slab page, and add it to the free list(->slots) * @id : idx of slabclass * * Returns 0 on success, otherwise errno. */ static int mc_do_slabs_newslab(unsigned int id) { slabclass_t *p = &slabclass[id]; int len = settings.slab_reassign ? settings.item_size_max : p->size * p->perslab; char *ptr; if (mem_limit && (mem_malloced + len > mem_limit) && p->slabs > 0) return -ENOMEM; if (mc_grow_slab_list(id)) return -ENOMEM; if (likely(!mem_base)) { struct buffer *lptr; BUFFER_PTR(&p->slab_list, lptr); ptr = mc_memory_allocate((size_t)len, &lptr[p->slabs]); } else { ptr = mc_memory_allocate((size_t)len, 0); } if (!ptr) return -ENOMEM; mc_split_slab_page_into_freelist(ptr, id); if (likely(!mem_base)) { p->slabs++; } else { void **lptr; BUFFER_PTR(&p->slab_list, lptr); lptr[p->slabs++] = ptr; } mem_malloced += len; return 0; }
/** * grows the hashtable to the next power of 2 */ static void mc_hash_expand(void) { size_t bytes; int ret = 0; old_hashtable = primary_hashtable; memcpy(&old_hts, &primary_hts, sizeof(old_hts)); bytes = hashsize(hashpower + 1) * sizeof(void *); ret = alloc_buffer(&primary_hts, bytes, __GFP_ZERO); if (!ret) { PVERBOSE(1, "hash table expansion starting\n"); BUFFER_PTR(&primary_hts, primary_hashtable); hashpower++; set_bit(EXPANDING, &hashflags); expand_bucket = 0; ATOMIC32_SET(stats.hash_power_level, hashpower); ATOMIC64_ADD(stats.hash_bytes, bytes); set_bit(STATS_HASH_EXP, &stats.flags); } else { /* bad news, but we can keep running */ PRINTK("hash table expansion error\n"); memcpy(&primary_hts, &old_hts, sizeof(old_hts)); primary_hashtable = old_hashtable; } }
/** * mc_memory_allocate() - alloc memory, using buffer or from preallocated * @size : request size * @vptr : for buffer alloctor * * returns real buffer ptr on success, null otherwise */ static void* mc_memory_allocate(size_t size, void *vptr) { void *ret = NULL; if (likely(!mem_base)) { struct buffer *buf = vptr; if (mem_malloced + size > slabsize) return NULL; if (alloc_buffer(buf, size, __GFP_ZERO)) return NULL; BUFFER_PTR(buf, ret); } else { ret = mem_current; if (size > mem_avail) return NULL; /* XXX: mem_current pointer _must_ be aligned */ if (size % CHUNK_ALIGN_BYTES) { size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); } mem_current = ((char *)mem_current) + size; if (size < mem_avail) mem_avail -= size; else mem_avail = 0; memset(ret, 0, size); } return ret; }
void slabs_exit(void) { int i; slabclass_t *p; if (mem_base) { vfree(mem_base); } else { for (i = POWER_SMALLEST; i <= power_largest; i++) { int j; struct buffer *lptr; p = &slabclass[i]; if (!p->slabs) continue; BUFFER_PTR(&p->slab_list, lptr); for (j = 0; j < p->slabs; j++) { free_buffer(&lptr[j]); } } } for (i = POWER_SMALLEST; i <= power_largest; i++) { p = &slabclass[i]; free_buffer(&p->slab_list); } }
/** * stats in textual form suitable for writing to client. * * returns dump size on success, errno otherwise */ int mc_stats_prefix_dump(struct buffer *buf) { const char *format = "PREFIX %s get %llu hit %llu set %llu del %llu\r\n"; char *dumpstr; prefix_stats_t *pfs; int i, pos, res = 0; size_t size = 0, written = 0, total_written = 0; /* * Figure out how big the buffer needs to be. This is the sum of the * lengths of the prefixes themselves, plus the size of one copy of * the per-prefix output with 20-digit values for all the counts, * plus space for the "END" at the end. */ mutex_lock(&prefix_stats_lock); size = strlen(format) + total_prefix_size + num_prefixes * (strlen(format) - 2 /* %s */ + 4 * (20 - 4)) /* %llu replaced by 20-digit num */ + sizeof("END\r\n"); res = alloc_buffer(buf, size, 0); if (res) { mutex_unlock(&prefix_stats_lock); PRINTK("can't allocate stats response\n"); goto out; } BUFFER_PTR(buf, dumpstr); pos = 0; for (i = 0; i < PREFIX_HASH_SIZE; i++) { for (pfs = prefix_stats[i]; NULL != pfs; pfs = pfs->next) { written = snprintf(dumpstr + pos, size-pos, format, pfs->prefix, pfs->num_gets, pfs->num_hits, pfs->num_sets, pfs->num_deletes); pos += written; total_written += written; BUG_ON(total_written >= size); } } mutex_unlock(&prefix_stats_lock); memcpy(dumpstr + pos, "END\r\n", 6); res = pos + 5; out: return res; }
int hash_init(int power) { size_t bytes; int ret = 0; if (power) hashpower = power; bytes = hashsize(hashpower) * sizeof(void *); ret = alloc_buffer(&primary_hts, bytes, __GFP_ZERO); if (ret) { PRINTK("alloc primary_hashtable error\n"); goto out; } else { BUFFER_PTR(&primary_hts, primary_hashtable); } ATOMIC32_SET(stats.hash_power_level, hashpower); ATOMIC64_SET(stats.hash_bytes, bytes); out: return ret; }
static int do_spi_io(struct spi_device* lp_dev, u8* lp_send_buffer, u8* lp_recv_buffer, int buffer_size) { int ret_value; struct spi_message msg; struct spi_transfer xfer = { .len = buffer_size, .tx_buf = (void*)lp_send_buffer, .rx_buf = (void*)lp_recv_buffer, .speed_hz = 1000000, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); dev_info(&lp_dev->dev, "spi io: transfer size = %d\n", buffer_size); ret_value = spi_sync(lp_dev, &msg); if (IS_SUCCESS(ret_value)) { dev_info(&lp_dev->dev, "spi io done.\n"); } dev_info(&lp_dev->dev, "do_spi_io ret_value = %d\n", ret_value); return ret_value; } int do_io_transaction(struct spi_device* lp_dev, _IN_ struct spi_io_context* lp_io_context, _IN_ u8* const lp_send_buffer, int send_buffer_size, _OUT_ u8* lp_recv_buffer, int recv_buffer_size, _OUT_ int* lp_recved_size ) { int ret_value = ER_FAILED; int total_trafster = 0; int one_time_transfer = 0; int total_receive_size = 0; int remain_send_count = send_buffer_size; int remain_recv_count = 0; int is_recved_vaild_fh = 0; struct buffer* lp_send_operator = NULL; struct buffer* lp_recv_operator = NULL; struct buffer dummy_send_buffer; struct buffer dummy_recv_buffer; struct buffer send_buffer; struct buffer recv_buffer; INIT_BUFFER(&dummy_send_buffer, lp_io_context->send_dummy_buffer, lp_io_context->send_dummy_buffer_size); INIT_BUFFER(&dummy_recv_buffer, lp_io_context->recv_dummy_buffer, lp_io_context->recv_dummy_buffer_size); INIT_BUFFER(&send_buffer, lp_send_buffer, send_buffer_size); INIT_BUFFER(&recv_buffer, lp_recv_buffer, recv_buffer_size); /*need some check here, but still in think.*/ total_trafster = send_buffer_size; while(total_trafster > 0) { int send_buffer_is_dummy; int recv_buffer_is_dummy; /* Step1. try calc out transfer bye count */ if (0 != BUFFER_REMAIN_LENGTH(send_buffer)) { lp_send_operator = &send_buffer; send_buffer_is_dummy = FALSE; } else { lp_send_operator = &dummy_send_buffer; send_buffer_is_dummy = TRUE; } if (0 != remain_recv_count && is_recved_vaild_fh) { lp_recv_operator = &recv_buffer; recv_buffer_is_dummy = FALSE; } else { lp_recv_operator = &dummy_recv_buffer; recv_buffer_is_dummy = TRUE; } if (is_recved_vaild_fh) { RESET_BUFFER(&dummy_send_buffer); RESET_BUFFER(&dummy_recv_buffer); if (send_buffer_is_dummy && recv_buffer_is_dummy) { one_time_transfer = 0; } else { one_time_transfer = MIN(BUFFER_REMAIN_LENGTH(*lp_send_operator), BUFFER_REMAIN_LENGTH(*lp_recv_operator)); } } else { /* can't reset dummy recv buffer because it contain last time received splited data */ one_time_transfer = MIN(BUFFER_REMAIN_LENGTH(*lp_send_operator), BUFFER_REMAIN_LENGTH(dummy_recv_buffer)); } if (0 == one_time_transfer) { /*caller's receive buffer is not enough case.*/ if ( 0 != remain_recv_count) { ret_value = ER_NO_ENOUGH_RECV_BUFFER; } break; } /* Step 2. Prepare and do transfer */ dev_info(&lp_dev->dev, "before do_spi_io\n"); ret_value = do_spi_io(lp_dev, BUFFER_PTR(*lp_send_operator), BUFFER_PTR(*lp_recv_operator), one_time_transfer); if (IS_FAILED(ret_value)) { dev_err(&lp_dev->dev, "do_spi_io() failed! \n"); break; } dev_info(&lp_dev->dev, "after do_spi_io\n"); lp_send_operator->index += one_time_transfer; lp_recv_operator->index += one_time_transfer; remain_send_count = MAX(0, remain_send_count - one_time_transfer); remain_recv_count = MAX(0, remain_recv_count - one_time_transfer); total_trafster -= one_time_transfer; /* Step 3. check if we received valid frame header */ if (!is_recved_vaild_fh) { int total_payload_size; int contained_payload_size; int fh_start_index; int is_valid_fh; is_valid_fh = verify_frame_head_and_get_payload_size(lp_recv_operator->lp_ptr, BUFFER_USED_LENGTH(*lp_recv_operator), &total_payload_size, &contained_payload_size, &fh_start_index); if (IS_SUCCESS(is_valid_fh)) { int copy_size = contained_payload_size + SIZE_OF_FRAME_HEAD; int need_recv_buffer_size = total_payload_size + SIZE_OF_FRAME_HEAD; /*received new frame head!*/ remain_recv_count = total_payload_size - contained_payload_size; /*received frame head, so we update total transfer count here*/ total_trafster = MAX(remain_recv_count, remain_send_count); /* printf("[packege check]: total payload = %d, contained = %d, fh_start = %d\n", total_payload_size, contained_payload_size, fh_start_index); */ /*copy all valid data to actual receive buffer head*/ if (need_recv_buffer_size > BUFFER_REMAIN_LENGTH(recv_buffer)) { ret_value = ER_NO_ENOUGH_RECV_BUFFER; break; } /*do not reset buffer, because we now support received mulit-frame in one io cycle. */ //RESET_BUFFER(&recv_buffer); memcpy(BUFFER_PTR(recv_buffer), lp_recv_operator->lp_ptr + fh_start_index, copy_size); /* save total received size here to support receive mulit-frame */ total_receive_size += need_recv_buffer_size; recv_buffer.index += copy_size; recv_buffer.length = total_receive_size; // pr_err("dump: index = %d, length = %d\n", // recv_buffer.index, recv_buffer.length); is_recved_vaild_fh = TRUE; } else { int is_recved_hf_prefix = ER_FAILED; remain_recv_count = 0; //copy SIZEOF_FRAME_HEAD bytes from tail to head memcpy(dummy_recv_buffer.lp_ptr, BUFFER_PTR_FROM_USED_TAIL(*lp_recv_operator, SIZE_OF_FRAME_HEAD), SIZE_OF_FRAME_HEAD); dummy_recv_buffer.index = SIZE_OF_FRAME_HEAD; /*check if the last SIZE_OF_FRAME_HEAD bytes contained frame head prefix, we will read more data if it contained, to resovle slice case */ is_recved_hf_prefix = verify_frame_head_prefix(BUFFER_PTR_FROM_USED_TAIL(*lp_recv_operator, SIZE_OF_FRAME_HEAD), SIZE_OF_FRAME_HEAD); /* check if the received data included frame head prefix 0x53 */ if (IS_SUCCESS(is_recved_hf_prefix)) { total_trafster += BUFFER_REMAIN_LENGTH(dummy_recv_buffer); /* printf("set total_transfer = %d\n", total_trafster); */ } is_recved_vaild_fh = FALSE; } } else { /* if we already received one frame, but still has some data need send, we need change is_recved_vaild_fh = FALSE to prepare receive the next frame */ #if 1 if (remain_send_count > 0 && 0 == remain_recv_count) { is_recved_vaild_fh = FALSE; RESET_BUFFER(&dummy_recv_buffer); //pr_err("psh: note: try receive mulit-frame.\n"); } #endif } } #if 1 if (IS_FAILED(ret_value)) { /* dump recvied buffer */ dump_buffer(lp_recv_operator->lp_ptr, BUFFER_USED_LENGTH(*lp_recv_operator)); } else { //dump_buffer(recv_buffer.lp_ptr, BUFFER_USED_LENGTH(recv_buffer)); } #endif lp_recved_size ? *lp_recved_size = BUFFER_USED_LENGTH(recv_buffer) : 0; return ret_value; }