/* * overflow_estimate_npages_needed () - Guess the number of pages needed to insert * a set of overflow datas * return: npages * total_novf_sets(in): Number of set of overflow data * avg_ovfdata_size(in): Avergae size of overflow data */ int overflow_estimate_npages_needed (THREAD_ENTRY * thread_p, int total_novf_sets, int avg_ovfdata_size) { int npages; /* Overflow insertion First page.. Substract length for insertion in first page */ avg_ovfdata_size -= (DB_PAGESIZE - (int) offsetof (OVERFLOW_FIRST_PART, data)); if (avg_ovfdata_size > 0) { /* The rest of the pages */ npages = DB_PAGESIZE - offsetof (OVERFLOW_REST_PART, data); npages = CEIL_PTVDIV (avg_ovfdata_size, npages); } else { npages = 1; } npages *= total_novf_sets; npages += file_guess_numpages_overhead (thread_p, NULL, npages); return npages; }
static void cursor_allocate_oid_buffer (CURSOR_ID * cursor_id_p) { size_t oids_size, mops_size; /* * NOTE: Currently assume a PAGESIZE. In fact, since we can * find average tuple count per page from the LIST FILE * identifier we can make a good estimate of oid entry count. */ cursor_id_p->oid_ent_count = CEIL_PTVDIV (DB_PAGESIZE, sizeof (OID)) - 1; oids_size = cursor_id_p->oid_ent_count * sizeof (OID); cursor_id_p->oid_set = (OID *) malloc (oids_size); if (cursor_id_p->oid_set == NULL) { /* Ignore the failure, this is an optimization */ cursor_id_p->oid_ent_count = 0; } mops_size = cursor_id_p->oid_ent_count * sizeof (MOP); cursor_id_p->mop_set = (MOP *) malloc (mops_size); if (cursor_id_p->mop_set == NULL) { /* Ignore the failure, this is an optimization */ free_and_init (cursor_id_p->oid_set); cursor_id_p->oid_ent_count = 0; } }
static unsigned int mht_calculate_htsize (unsigned int ht_size) { int left, right, middle; /* indices for binary search */ if (ht_size > mht_Primes[NPRIMES - 1]) { /* get a power of two */ if (!((ht_size & (ht_size - 1)) == 0)) { /* Turn off some bits but the left most one */ while (!(ht_size & (ht_size - 1))) { ht_size &= (ht_size - 1); } ht_size <<= 1; } } else { /* we can assign a primary number; binary search */ for (middle = 0, left = 0, right = NPRIMES - 1; left <= right;) { middle = CEIL_PTVDIV ((left + right), 2); if (ht_size == mht_Primes[middle]) { break; } else if (ht_size > mht_Primes[middle]) { left = middle + 1; } else { right = middle - 1; } } /* If we didn't find the size, get the larger size and not the small one */ if (ht_size > mht_Primes[middle] && middle < (NPRIMES - 1)) { middle++; } ht_size = mht_Primes[middle]; } return ht_size; }
/* * overflow_insert () - Insert a multipage data in overflow * return: ovf_vpid on success or NULL on failure * ovf_vfid(in): File where the overflow data is going to be stored * ovf_vpid(out): Overflow address * recdes(in): Record descriptor * * Note: Data in overflow is composed of several pages. Pages in the overflow * area are not shared among other pieces of overflow data. * * -------------------------------- ------------------------ * |Next_vpid |Length|... data ...| ... --> |Next_vpid|... data ...| * -------------------------------- ------------------------ * * Single link list of pages. * The length of the multipage data is stored on its first overflow page * * Overflow pages are not locked in any mode since they are not shared * by other pieces of data and its address is only know by accessing the * relocation overflow record data which has been appropriately locked. */ VPID * overflow_insert (THREAD_ENTRY * thread_p, const VFID * ovf_vfid, VPID * ovf_vpid, RECDES * recdes) { PAGE_PTR vfid_fhdr_pgptr = NULL; OVERFLOW_FIRST_PART *first_part; OVERFLOW_REST_PART *rest_parts; OVERFLOW_RECV_LINKS undo_recv; char *copyto; int length, copy_length; INT32 npages = 0; char *data; int alloc_nth; LOG_DATA_ADDR addr; LOG_DATA_ADDR logical_undoaddr; int i; VPID *vpids, fhdr_vpid; VPID vpids_buffer[OVERFLOW_ALLOCVPID_ARRAY_SIZE + 1]; FILE_ALLOC_VPIDS alloc_vpids; /* * We don't need to lock the overflow pages since these pages are not * shared among several pieces of overflow data. The overflow pages are * know by accessing the relocation-overflow record with the appropiate lock */ addr.vfid = ovf_vfid; addr.offset = 0; logical_undoaddr.vfid = ovf_vfid; logical_undoaddr.offset = 0; logical_undoaddr.pgptr = NULL; undo_recv.ovf_vfid = *ovf_vfid; /* * Temporary: * Lock the file header, so I am the only one changing the file table * of allocated pages. This is needed since this function is using * file_find_nthpages, which could give me not the expected page, if someone * else remove pages, after the initial allocation. */ fhdr_vpid.volid = ovf_vfid->volid; fhdr_vpid.pageid = ovf_vfid->fileid; vfid_fhdr_pgptr = pgbuf_fix (thread_p, &fhdr_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH); if (vfid_fhdr_pgptr == NULL) { return NULL; } /* * Guess the number of pages. The total number of pages is found by * dividing length by pagesize - the smallest header. Then, we make sure * that this estimate is correct. */ length = recdes->length - (DB_PAGESIZE - (int) offsetof (OVERFLOW_FIRST_PART, data)); if (length > 0) { i = DB_PAGESIZE - offsetof (OVERFLOW_REST_PART, data); npages = 1 + CEIL_PTVDIV (length, i); } else { npages = 1; } if (npages > OVERFLOW_ALLOCVPID_ARRAY_SIZE) { vpids = (VPID *) malloc ((npages + 1) * sizeof (VPID)); if (vpids == NULL) { er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, (npages + 1) * sizeof (VPID)); pgbuf_unfix (thread_p, vfid_fhdr_pgptr); return NULL; } } else { vpids = vpids_buffer; } #if !defined(NDEBUG) for (i = 0; i < npages; i++) { VPID_SET_NULL (&vpids[i]); } #endif VPID_SET_NULL (&vpids[npages]); alloc_vpids.vpids = vpids; alloc_vpids.index = 0; /* * We do not initialize the pages during allocation since they are not * pointed by anyone until we return from this function, at that point * they are initialized. */ if (file_alloc_pages_as_noncontiguous (thread_p, ovf_vfid, vpids, &alloc_nth, npages, NULL, NULL, NULL, &alloc_vpids) == NULL) { if (vpids != vpids_buffer) { free_and_init (vpids); } pgbuf_unfix (thread_p, vfid_fhdr_pgptr); return NULL; } assert (alloc_vpids.index == npages); #if !defined(NDEBUG) for (i = 0; i < npages; i++) { assert (!VPID_ISNULL (&vpids[i])); } #endif *ovf_vpid = vpids[0]; /* Copy the content of the data */ data = recdes->data; length = recdes->length; for (i = 0; i < npages; i++) { addr.pgptr = pgbuf_fix (thread_p, &vpids[i], NEW_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH); if (addr.pgptr == NULL) { goto exit_on_error; } /* Is this the first page ? */ if (i == 0) { /* This is the first part */ first_part = (OVERFLOW_FIRST_PART *) addr.pgptr; first_part->next_vpid = vpids[i + 1]; first_part->length = length; copyto = (char *) first_part->data; copy_length = DB_PAGESIZE - offsetof (OVERFLOW_FIRST_PART, data); if (length < copy_length) { copy_length = length; } /* notify the first part of overflow recdes */ log_append_empty_record (thread_p, LOG_DUMMY_OVF_RECORD); } else { rest_parts = (OVERFLOW_REST_PART *) addr.pgptr; rest_parts->next_vpid = vpids[i + 1]; copyto = (char *) rest_parts->data; copy_length = DB_PAGESIZE - offsetof (OVERFLOW_REST_PART, data); if (length < copy_length) { copy_length = length; } } memcpy (copyto, data, copy_length); data += copy_length; length -= copy_length; pgbuf_get_vpid (addr.pgptr, &undo_recv.new_vpid); if (file_is_new_file (thread_p, ovf_vfid) == FILE_OLD_FILE) { /* we don't do undo logging for new files */ log_append_undo_data (thread_p, RVOVF_NEWPAGE_LOGICAL_UNDO, &logical_undoaddr, sizeof (undo_recv), &undo_recv); } log_append_redo_data (thread_p, RVOVF_NEWPAGE_INSERT, &addr, copy_length + CAST_BUFLEN (copyto - (char *) addr.pgptr), (char *) addr.pgptr); pgbuf_set_dirty (thread_p, addr.pgptr, FREE); addr.pgptr = NULL; } assert (length == 0); #if defined (CUBRID_DEBUG) if (length > 0) { er_log_debug (ARG_FILE_LINE, "ovf_insert: ** SYSTEM ERROR calculation" " of number of pages needed to store overflow data seems" " incorrect. Need no more than %d pages", npages); goto exit_on_error; } #endif /* * Temporary: * Unlock the file header, so I am the only one changing the file table * of allocated pages. This is needed since curently, I am using * file_find_nthpages, which could give me not the expected page, if someone * else remove pages. */ if (vpids != vpids_buffer) { free_and_init (vpids); } pgbuf_unfix (thread_p, vfid_fhdr_pgptr); return ovf_vpid; exit_on_error: for (i = 0; i < npages; i++) { (void) file_dealloc_page (thread_p, ovf_vfid, &vpids[i]); } if (vpids != vpids_buffer) { free_and_init (vpids); } pgbuf_unfix (thread_p, vfid_fhdr_pgptr); return NULL; }
/* test functions */ static int test_area (AREA_CREATE_INFO * info, int nthreads, void *(*proc) (void *)) { #define MAX_THREADS 64 AREA *area = NULL; pthread_t threads[MAX_THREADS]; char msg[256]; int i; assert (info != NULL); sprintf (msg, "%s(size:%d, count:%d), %d threads", info->name, info->entry_size, info->alloc_cnt, nthreads); begin (msg); /* initialization */ if (nthreads > MAX_THREADS) { return fail ("too many threads"); } /* initialization */ area_init (); area = area_create (info->name, info->entry_size, info->alloc_cnt); if (area == NULL) { return fail ("area create fail"); } /* multithreaded test */ for (i = 0; i < nthreads; i++) { if (pthread_create (&threads[i], NULL, proc, (void *) area) != NO_ERROR) { return fail ("thread create"); } } for (i = 0; i < nthreads; i++) { void *retval; pthread_join (threads[i], &retval); if (retval != NO_ERROR) { return fail ("thread proc error"); } } /* results */ { AREA_BLOCKSET_LIST *blockset; AREA_BLOCK *block; int i, j, blockset_cnt = 0, block_cnt = 0, chunk_count; for (blockset = area->blockset_list; blockset != NULL; blockset = blockset->next) { for (i = 0; i < blockset->used_count; i++) { block = blockset->items[i]; assert (block != NULL); chunk_count = CEIL_PTVDIV (block->bitmap.entry_count, LF_BITFIELD_WORD_SIZE); for (j = 0; j < chunk_count; j++) { if (block->bitmap.bitfield[j]) { return fail ("check bitmap status"); } } block_cnt++; } blockset_cnt++; } printf (" Used %3d blocks(%2d blocksets). ", block_cnt, blockset_cnt); } /* destory */ area_destroy (area); area_final (); return success (); #undef MAX_THREADS }