void it_free_page (index_tree_t * it, buffer_desc_t * buf) { short l; it_map_t * itm; dp_addr_t remap; ASSERT_IN_MAP (buf->bd_tree, buf->bd_page); itm = IT_DP_MAP (buf->bd_tree, buf->bd_page); remap = (dp_addr_t) (ptrlong) gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap); if (!buf->bd_is_write) GPF_T1 ("isp_free_page without write access to buffer."); dp_may_compact (buf->bd_storage, buf->bd_page); /* no need to keep deld buffers in checked for compact list */ l=SHORT_REF (buf->bd_buffer + DP_FLAGS); if (!(l == DPF_BLOB || l == DPF_BLOB_DIR) && !remap) GPF_T1 ("Freeing a page that is not remapped"); if (DPF_INDEX == l) it->it_n_index_est--; else it->it_n_blob_est--; if (buf->bd_page != buf->bd_physical_page && (DPF_BLOB_DIR == l || DPF_BLOB == l)) GPF_T1 ("blob is not supposed to be remapped"); DBG_PT_PRINTF ((" Delete %ld remap %ld FL=%d buf=%p\n", buf->bd_page, buf->bd_physical_page, l, buf)); if (buf->bd_iq) { mutex_leave (&itm->itm_mtx); buf_cancel_write (buf); mutex_enter (&itm->itm_mtx); } if (!remap) { /* a blob in checkpoint space can be deleted without a remap existing in commit space. */ if (DPF_BLOB != l && DPF_BLOB_DIR != l ) GPF_T1 ("not supposed to delete a buffer in a different space unless it's a blob"); if (buf->bd_is_dirty) GPF_T1 ("blob in checkpoint space can't be dirty - has no remap, in commit, hence is in checkpoint"); sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap, (void*) (ptrlong) DP_DELETED); remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_dp_to_buf); page_leave_as_deleted (buf); return; } if (IS_NEW_BUFFER (buf)) /* if this was CREATED AND DELETED without intervening checkpoint the delete * does not carry outside the commit space. */ remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap); else sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap, (void *) (ptrlong) DP_DELETED); if (!remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_dp_to_buf)) GPF_T1 ("it_free_page does not hit the buffer in tree cache"); it_free_remap (it, buf->bd_page, buf->bd_physical_page, l); page_leave_as_deleted (buf); }
void dbs_extent_free (dbe_storage_t * dbs, dp_addr_t ext_dp, int must_be_in_em) { extent_map_t * em; extent_t * ext; int word, bit; uint32 * arr, page_no; ASSERT_IN_DBS (dbs); dbs_locate_ext_bit (dbs, ext_dp, &arr, &page_no, &word, &bit); if (0 == (arr[word] & 1 << bit)) GPF_T1 ("double free in ext set"); page_set_update_checksum (arr, word, bit); arr[word] &= ~(1 << bit); em = DBS_DP_TO_EM (dbs, ext_dp); if (em) { ASSERT_IN_MTX (em->em_mtx); ext = EM_DP_TO_EXT (em, ext_dp); if (ext) { remhash (DP_ADDR2VOID (ext_dp), em->em_dp_to_ext); switch (EXT_TYPE (ext)) { case EXT_INDEX: em->em_n_pages -= EXTENT_SZ; em->em_n_free_pages -= EXTENT_SZ; break; case EXT_REMAP: em->em_n_remap_pages -= EXTENT_SZ; em->em_n_free_remap_pages -= EXTENT_SZ; break; case EXT_BLOB: em->em_n_blob_pages -= EXTENT_SZ; em->em_n_free_blob_pages -= EXTENT_SZ; break; } ext->ext_flags = EXT_FREE; } if (ext == em->em_last_remap_ext) em->em_last_remap_ext = NULL; if (ext == em->em_last_blob_ext) em->em_last_blob_ext = NULL; remhash (DP_ADDR2VOID (ext_dp), dbs->dbs_dp_to_extent_map); } else if (must_be_in_em) GPF_T1 ("cannot free ext that is not part of any em"); }
void stmt_free_bookmarks (cli_stmt_t * stmt) { caddr_t k, id; dk_hash_iterator_t hit; if (!stmt->stmt_bookmarks) return; IN_CON (stmt->stmt_connection); dk_hash_iterator (&hit, stmt->stmt_bookmarks); while (dk_hit_next (&hit, (void **) &k, (void **) &id)) { remhash ((void *) k, stmt->stmt_connection->con_bookmarks); dk_free_tree (id); } hash_table_free (stmt->stmt_bookmarks); id_hash_free (stmt->stmt_bookmarks_rev); LEAVE_CON (stmt->stmt_connection); }
GC gc_cache_lookup (struct gc_cache *cache, XGCValues *gcv, unsigned long mask) { struct gc_cache_cell *cell, *next, *prev; struct gcv_and_mask gcvm; if ((!!cache->head) != (!!cache->tail)) abort (); if (cache->head && (cache->head->prev || cache->tail->next)) abort (); gcvm.mask = mask; gcvm.gcv = *gcv; /* this copies... */ #ifdef GCCACHE_HASH if (gethash (&gcvm, cache->table, (void *) &cell)) #else /* !GCCACHE_HASH */ cell = cache->tail; /* start at the end (most recently used) */ while (cell) { if (gc_cache_eql (&gcvm, &cell->gcvm)) break; else cell = cell->prev; } if (cell) #endif /* !GCCACHE_HASH */ { /* Found a cell. Move this cell to the end of the list, so that it will be less likely to be collected than a cell that was accessed less recently. */ if (cell == cache->tail) return cell->gc; next = cell->next; prev = cell->prev; if (prev) prev->next = next; if (next) next->prev = prev; if (cache->head == cell) cache->head = next; cell->next = 0; cell->prev = cache->tail; cache->tail->next = cell; cache->tail = cell; if (cache->head == cell) abort (); if (cell->next) abort (); if (cache->head->prev) abort (); if (cache->tail->next) abort (); return cell->gc; } /* else, cache miss. */ if (cache->size == GC_CACHE_SIZE) /* Reuse the first cell on the list (least-recently-used.) Remove it from the list, and unhash it from the table. */ { cell = cache->head; cache->head = cell->next; cache->head->prev = 0; if (cache->tail == cell) cache->tail = 0; /* only one */ BLOCK_INPUT; XFreeGC (cache->dpy, cell->gc); cache->delete_count++; UNBLOCK_INPUT; #ifdef GCCACHE_HASH remhash (&cell->gcvm, cache->table); #endif } else if (cache->size > GC_CACHE_SIZE) abort (); else { /* Allocate a new cell (don't put it in the list or table yet.) */ cell = (struct gc_cache_cell *) xmalloc (sizeof (struct gc_cache_cell)); cache->size++; } /* Now we've got a cell (new or reused). Fill it in. */ memcpy (&cell->gcvm.gcv, gcv, sizeof (XGCValues)); cell->gcvm.mask = mask; /* Put the cell on the end of the list. */ cell->next = 0; cell->prev = cache->tail; if (cache->tail) cache->tail->next = cell; cache->tail = cell; if (! cache->head) cache->head = cell; cache->create_count++; #ifdef GCCACHE_HASH /* Hash it in the table */ puthash (&cell->gcvm, cell, cache->table); #endif /* Now make and return the GC. */ BLOCK_INPUT; cell->gc = XCreateGC (cache->dpy, cache->window, mask, gcv); UNBLOCK_INPUT; /* debug */ if (cell->gc != gc_cache_lookup (cache, gcv, mask)) abort (); return cell->gc; }
static void check_free (void *ptr) { __free_hook = 0; __malloc_hook = 0; if (!pointer_table) pointer_table = make_hash_table (max (100, FREE_QUEUE_LIMIT * 2)); if (ptr != 0) { long size; #ifdef UNMAPPED_FREE unsigned long rounded_up_size; #endif EMACS_INT present = (EMACS_INT) gethash (ptr, pointer_table, (const void **) &size); if (!present) { /* This can only happen if you try to free something that didn't come from malloc */ #if !defined(__linux__) /* I originally wrote: "There's really no need to drop core." I have seen the error of my ways. -slb */ if (strict_free_check) ABORT (); #endif printf("Freeing unmalloc'ed memory at %p\n", ptr); __free_hook = check_free; __malloc_hook = check_malloc; goto end; } if (size < 0) { /* This happens when you free twice */ #if !defined(__linux__) /* See above comment. */ if (strict_free_check) ABORT (); #endif printf("Freeing %p twice\n", ptr); __free_hook = check_free; __malloc_hook = check_malloc; goto end; } puthash (ptr, (void *)-size, pointer_table); #ifdef UNMAPPED_FREE /* Round up size to an even number of pages. */ rounded_up_size = ROUND_UP_TO_PAGE (size); /* Protect the pages freed from all access */ if (strict_free_check) mprotect (ptr, rounded_up_size, PROT_NONE); #else /* Set every word in the block to 0xdeadbeef */ if (strict_free_check) { unsigned long long_length = (size + (sizeof (long) - 1)) / sizeof (long); unsigned long i; for (i = 0; i < long_length; i++) ((unsigned long *) ptr)[i] = 0xdeadbeef; } #endif free_queue[current_free].address = ptr; free_queue[current_free].length = size; current_free++; if (current_free >= FREE_QUEUE_LIMIT) current_free = 0; /* Really free this if there's something there */ { void *old = free_queue[current_free].address; if (old) { #ifdef UNMAPPED_FREE unsigned long old_len = free_queue[current_free].length; mprotect (old, old_len, PROT_READ | PROT_WRITE | PROT_EXEC); #endif free (old); remhash (old, pointer_table); } } } __free_hook = check_free; __malloc_hook = check_malloc; end: return; }
int main(int argc, char *argv[]) { int c; int n; int rval; char *p; setlocale(LC_ALL, ""); traceout = stderr; if (signal(SIGINT, SIG_IGN) != SIG_IGN) signal(SIGINT, onintr); initkwds(); initspaces(); STACKMAX = INITSTACKMAX; mstack = (stae *)xalloc(sizeof(stae) * STACKMAX); sstack = (char *)xalloc(STACKMAX); maxout = 0; outfile = NULL; resizedivs(MAXOUT); while ((c = getopt(argc, argv, "gst:d:D:U:o:I:")) != -1) switch(c) { case 'D': /* define something..*/ for (p = optarg; *p; p++) if (*p == '=') break; if (p == optarg) errx(1, "null variable cannot be defined"); if (*p) *p++ = EOS; dodefine(optarg, p); break; case 'I': addtoincludepath(optarg); break; case 'U': /* undefine... */ remhash(optarg, TOP); break; case 'g': mimic_gnu = 1; break; case 'd': set_trace_flags(optarg); break; case 's': synccpp = 1; break; case 't': mark_traced(optarg, 1); break; case 'o': trace_file(optarg); break; case '?': default: usage(); } argc -= optind; argv += optind; rval = 0; active = stdout; /* default active output */ bbase[0] = bufbase; if (!argc) { sp = -1; /* stack pointer initialized */ fp = 0; /* frame pointer initialized */ set_input(infile+0, stdin, "stdin"); /* default input (naturally) */ if ((inname[0] = strdup("-")) == NULL) err(1, NULL); inlineno[0] = 1; emitline(); macro(); } else for (; argc--; ++argv) { p = *argv; if (p[0] == '-' && p[1] == EOS) set_input(infile, stdin, "stdin"); else if (fopen_trypath(infile, p) == NULL) { warn("%s", p); rval = 1; continue; } sp = -1; fp = 0; if ((inname[0] = strdup(p)) == NULL) err(1, NULL); inlineno[0] = 1; emitline(); macro(); release_input(infile); } if (*m4wraps) { /* anything for rundown ?? */ ilevel = 0; /* in case m4wrap includes.. */ bufbase = bp = buf; /* use the entire buffer */ pbstr(m4wraps); /* user-defined wrapup act */ macro(); /* last will and testament */ } if (active != stdout) active = stdout; /* reset output just in case */ for (n = 1; n < maxout; n++) /* default wrap-up: undivert */ if (outfile[n] != NULL) getdiv(n); /* remove bitbucket if used */ if (outfile[0] != NULL) { (void) fclose(outfile[0]); } exit(rval); }
GC gc_cache_lookup(struct gc_cache *cache, XGCValues * gcv, unsigned long mask) { struct gc_cache_cell *cell = NULL, *next = NULL, *prev = NULL; struct gcv_and_mask gcvm; if (cache == NULL) abort(); else if ((!!cache->head) != (!!cache->tail)) abort(); else if (cache->head && cache->tail && (cache->head->prev || cache->tail->next)) abort(); else { gcvm.mask = mask; gcvm.gcv = *gcv; /* this copies... */ #ifdef GCCACHE_HASH if (gethash(&gcvm, cache->table, (const void **)((void*)&cell))) #else /* !GCCACHE_HASH */ /* start at the end (most recently used) */ cell = cache->tail; while (cell) { if (gc_cache_eql(&gcvm, &cell->gcvm)) break; else cell = cell->prev; } /* #### This whole file needs some serious overhauling. */ if (!(mask | GCTile) && cell->gc->values.tile) cell = NULL; else if (!(mask | GCStipple) && cell->gc->values.stipple) cell = NULL; #endif /* !GCCACHE_HASH */ { /* Found a cell. Move this cell to the end of the list, so that it will be less likely to be collected than a cell that was accessed less recently. */ if (!cell) { abort(); return NULL; } else { if (cell == cache->tail) return cell->gc; next = cell->next; prev = cell->prev; if (prev) prev->next = next; if (next) next->prev = prev; if (cache->head == cell) cache->head = next; cell->next = NULL; cell->prev = cache->tail; if (cache->tail) cache->tail->next = cell; else abort(); cache->tail = cell; if (cache->head == cell) abort(); else if (cell->next) abort(); else if (cache->head != NULL && cache->head->prev) abort(); else if (cache->tail != NULL && cache->tail->next) abort(); return cell->gc; } } /* else, cache miss. */ if (cache == NULL) abort(); else if (cache->size == GC_CACHE_SIZE) /* Reuse the first cell on the list (least-recently-used). Remove it from the list, and unhash it from the table. */ { cell = cache->head; if (cache->head != NULL) { cache->head = cell->next; cache->head->prev = 0; } if (cache->tail == cell) cache->tail = 0; /* only one */ XFreeGC(cache->dpy, cell->gc); cache->delete_count++; #ifdef GCCACHE_HASH remhash(&cell->gcvm, cache->table); #endif } else if (cache->size > GC_CACHE_SIZE) abort(); else { /* Allocate a new cell (don't put it in the list or table yet). */ cell = xnew(struct gc_cache_cell); cache->size++; } if (cell != NULL) { /* Now we've got a cell (new or reused). Fill it in. */ memcpy(&cell->gcvm.gcv, gcv, sizeof(XGCValues)); cell->gcvm.mask = mask; /* Put the cell on the end of the list. */ cell->next = 0; cell->prev = cache->tail; if (cache->tail) cache->tail->next = cell; cache->tail = cell; if (!cache->head) cache->head = cell; cache->create_count++; #ifdef GCCACHE_HASH /* Hash it in the table */ puthash(&cell->gcvm, cell, cache->table); #endif /* Now make and return the GC. */ cell->gc = XCreateGC(cache->dpy, cache->window, mask, gcv); /* debug */ assert(cell->gc == gc_cache_lookup(cache, gcv, mask)); return cell->gc; } } return NULL; /* No cell determined */ }
/* * expand_builtin - evaluate built-in macros. */ void expand_builtin(const char *argv[], int argc, int td) { int c, n; int ac; static int sysval = 0; #ifdef DEBUG printf("argc = %d\n", argc); for (n = 0; n < argc; n++) printf("argv[%d] = %s\n", n, argv[n]); fflush(stdout); #endif /* * if argc == 3 and argv[2] is null, then we * have macro-or-builtin() type call. We adjust * argc to avoid further checking.. */ ac = argc; if (argc == 3 && !*(argv[2])) argc--; switch (td & TYPEMASK) { case DEFITYPE: if (argc > 2) dodefine(argv[2], (argc > 3) ? argv[3] : null); break; case PUSDTYPE: if (argc > 2) dopushdef(argv[2], (argc > 3) ? argv[3] : null); break; case DUMPTYPE: dodump(argv, argc); break; case TRACEONTYPE: dotrace(argv, argc, 1); break; case TRACEOFFTYPE: dotrace(argv, argc, 0); break; case EXPRTYPE: /* * doexpr - evaluate arithmetic * expression */ if (argc > 2) pbnum(expr(argv[2])); break; case IFELTYPE: if (argc > 4) doifelse(argv, argc); break; case IFDFTYPE: /* * doifdef - select one of two * alternatives based on the existence of * another definition */ if (argc > 3) { if (lookup(argv[2]) != nil) pbstr(argv[3]); else if (argc > 4) pbstr(argv[4]); } break; case LENGTYPE: /* * dolen - find the length of the * argument */ pbnum((argc > 2) ? strlen(argv[2]) : 0); break; case INCRTYPE: /* * doincr - increment the value of the * argument */ if (argc > 2) pbnum(atoi(argv[2]) + 1); break; case DECRTYPE: /* * dodecr - decrement the value of the * argument */ if (argc > 2) pbnum(atoi(argv[2]) - 1); break; case SYSCTYPE: /* * dosys - execute system command */ if (argc > 2) { fflush(NULL); sysval = system(argv[2]); } break; case SYSVTYPE: /* * dosysval - return value of the last * system call. * */ pbnum(sysval); break; case ESYSCMDTYPE: if (argc > 2) doesyscmd(argv[2]); break; case INCLTYPE: if (argc > 2) if (!doincl(argv[2])) err(1, "%s at line %lu: include(%s)", CURRENT_NAME, CURRENT_LINE, argv[2]); break; case SINCTYPE: if (argc > 2) (void) doincl(argv[2]); break; #ifdef EXTENDED case PASTTYPE: if (argc > 2) if (!dopaste(argv[2])) err(1, "%s at line %lu: paste(%s)", CURRENT_NAME, CURRENT_LINE, argv[2]); break; case SPASTYPE: if (argc > 2) (void) dopaste(argv[2]); break; #endif case CHNQTYPE: if (mimic_gnu) gnu_dochq(argv, ac); else dochq(argv, argc); break; case CHNCTYPE: if (mimic_gnu) gnu_dochc(argv, ac); else dochc(argv, argc); break; case SUBSTYPE: /* * dosub - select substring * */ if (argc > 3) dosub(argv, argc); break; case SHIFTYPE: /* * doshift - push back all arguments * except the first one (i.e. skip * argv[2]) */ if (argc > 3) { for (n = argc - 1; n > 3; n--) { pbstr(rquote); pbstr(argv[n]); pbstr(lquote); putback(COMMA); } pbstr(rquote); pbstr(argv[3]); pbstr(lquote); } break; case DIVRTYPE: if (argc > 2 && (n = atoi(argv[2])) != 0) dodiv(n); else { active = stdout; oindex = 0; } break; case UNDVTYPE: doundiv(argv, argc); break; case DIVNTYPE: /* * dodivnum - return the number of * current output diversion */ pbnum(oindex); break; case UNDFTYPE: /* * doundefine - undefine a previously * defined macro(s) or m4 keyword(s). */ if (argc > 2) for (n = 2; n < argc; n++) remhash(argv[n], ALL); break; case POPDTYPE: /* * dopopdef - remove the topmost * definitions of macro(s) or m4 * keyword(s). */ if (argc > 2) for (n = 2; n < argc; n++) remhash(argv[n], TOP); break; case MKTMTYPE: /* * dotemp - create a temporary file */ if (argc > 2) { int fd; char *temp; temp = xstrdup(argv[2]); fd = mkstemp(temp); if (fd == -1) err(1, "%s at line %lu: couldn't make temp file %s", CURRENT_NAME, CURRENT_LINE, argv[2]); close(fd); pbstr(temp); free(temp); } break; case TRNLTYPE: /* * dotranslit - replace all characters in * the source string that appears in the * "from" string with the corresponding * characters in the "to" string. */ if (argc > 3) { char *temp; temp = xalloc(strlen(argv[2])+1); if (argc > 4) map(temp, argv[2], argv[3], argv[4]); else map(temp, argv[2], argv[3], null); pbstr(temp); free(temp); } else if (argc > 2) pbstr(argv[2]); break; case INDXTYPE: /* * doindex - find the index of the second * argument string in the first argument * string. -1 if not present. */ pbnum((argc > 3) ? indx(argv[2], argv[3]) : -1); break; case ERRPTYPE: /* * doerrp - print the arguments to stderr * file */ if (argc > 2) { for (n = 2; n < argc; n++) fprintf(stderr, "%s ", argv[n]); fprintf(stderr, "\n"); } break; case DNLNTYPE: /* * dodnl - eat-up-to and including * newline */ while ((c = gpbc()) != '\n' && c != EOF) ; break; case M4WRTYPE: /* * dom4wrap - set up for * wrap-up/wind-down activity */ m4wraps = (argc > 2) ? xstrdup(argv[2]) : null; break; case EXITTYPE: /* * doexit - immediate exit from m4. */ killdiv(); exit((argc > 2) ? atoi(argv[2]) : 0); break; case DEFNTYPE: if (argc > 2) for (n = 2; n < argc; n++) dodefn(argv[n]); break; case INDIRTYPE: /* Indirect call */ if (argc > 2) doindir(argv, argc); break; case BUILTINTYPE: /* Builtins only */ if (argc > 2) dobuiltin(argv, argc); break; case PATSTYPE: if (argc > 2) dopatsubst(argv, argc); break; case REGEXPTYPE: if (argc > 2) doregexp(argv, argc); break; case LINETYPE: doprintlineno(infile+ilevel); break; case FILENAMETYPE: doprintfilename(infile+ilevel); break; case SELFTYPE: pbstr(rquote); pbstr(argv[1]); pbstr(lquote); break; default: errx(1, "%s at line %lu: eval: major botch.", CURRENT_NAME, CURRENT_LINE); break; } }
void it_free_dp_no_read (index_tree_t * it, dp_addr_t dp, int dp_type) { buffer_desc_t * buf; dp_addr_t phys_dp = 0; it_map_t * itm = IT_DP_MAP (it, dp); ASSERT_IN_MAP (it, dp); buf = IT_DP_TO_BUF (it, dp); if (buf) phys_dp = buf->bd_physical_page; else IT_DP_REMAP (it, dp, phys_dp); if (buf && buf->bd_being_read) { log_info ("Deleting blob page while it is being read dp=%d .\n", dp); /* the buffer can be a being read decoy with no dp, so check dps only if not being read */ } else if (phys_dp != dp) GPF_T1 ("A blob/hash temp dp is not supposed to be remapped in isp_free_blob_dp_no_read"); if (buf) { it_cursor_t itc_auto; it_cursor_t * itc = &itc_auto; ITC_INIT (itc, isp, NULL); itc_from_it (itc, it); itc->itc_itm1 = itm; /* already inside, set itc_itm1 to mark this */ /* Note that the the buf is not passed to page_wait_access. This is because of 'being read' possibility. page_fault will detect this and sync. */ page_wait_access (itc, dp, NULL, &buf, PA_WRITE, RWG_WAIT_ANY); if (PF_OF_DELETED == buf) { ITC_LEAVE_MAPS (itc); return; } if (dp_type != SHORT_REF (buf->bd_buffer + DP_FLAGS)) GPF_T1 ("About to delete non-blob page from blob page dir."); ITC_IN_KNOWN_MAP (itc, dp); /* get back in, could have come out if waited */ it_free_page (it, buf); return; } DBG_PT_PRINTF (("Free absent blob L=%d \n", dp)); { dp_addr_t remap = (dp_addr_t) (ptrlong) gethash (DP_ADDR2VOID (dp), &itm->itm_remap); dp_addr_t cpt_remap = (dp_addr_t) (ptrlong) DP_CHECKPOINT_REMAP (it->it_storage, dp); if (cpt_remap) GPF_T1 ("Blob/hash temp dp not expected to have cpt remap in delete no read"); if (DPF_BLOB == dp_type) it->it_n_blob_est--; if (remap) { /* if this was CREATED AND DELETED without intervening checkpoint the delete * does not carry outside commit space. */ remhash (DP_ADDR2VOID (dp), &itm->itm_remap); em_free_dp (it->it_extent_map, dp, DPF_BLOB == dp_type ? EXT_BLOB : EXT_INDEX); } else { if (DPF_HASH == dp_type) GPF_T1 ("a hash temp page is not supposed to be in cpt s[space"); sethash (DP_ADDR2VOID (dp), &itm->itm_remap, (void *) (ptrlong) DP_DELETED); } } }
buffer_desc_t * it_new_page (index_tree_t * it, dp_addr_t addr, int type, int in_pmap, it_cursor_t * has_hold) { it_map_t * itm; extent_map_t * em = it->it_extent_map; int ext_type = (!it->it_blobs_with_index && (DPF_BLOB == type || DPF_BLOB_DIR == type)) ? EXT_BLOB : EXT_INDEX, n_tries; buffer_desc_t *buf; buffer_pool_t * action_bp = NULL; dp_addr_t physical_dp; if (in_pmap) GPF_T1 ("do not call isp_new_page in page map"); physical_dp = em_new_dp (em, ext_type, addr, NULL); if (!physical_dp) { log_error ("Out of disk space for database"); if (DPF_INDEX == type) { /* a split must never fail to get a page. Use the remap hold as a backup */ physical_dp = em_new_dp (it->it_extent_map, EXT_REMAP, 0, &has_hold->itc_n_pages_on_hold); if (!physical_dp) { log_error ("After running out of disk, cannot get a page from remap reserve to complete operation. Exiting."); call_exit (-1); } } else return NULL; } if (DPF_INDEX == type) it->it_n_index_est++; else it->it_n_blob_est++; for (n_tries = 0; ; n_tries++) { buf = bp_get_buffer_1 (NULL, &action_bp, in_log_replay ? BP_BUF_REQUIRED : BP_BUF_IF_AVAIL); if (buf) break; if (action_bp) bp_delayed_stat_action (action_bp); action_bp = NULL; if (n_tries > 10) { log_error ("Signaling out of disk due to failure to get a buffer. This condition is not a case of running out of disk"); return NULL; } if (5 == n_tries) log_info ("Failed to get a buffer for a new page. Retrying. If the failure repeats, an out of disk error will be signalled. The cause of this is having too many buffers wired down for preread, flush or group by/hash join temp space. To correct, increase the number of buffers in the configuration file. If this repeats in spite of having hundreds of thousands of buffers, please report to support."); if (n_tries > 4) virtuoso_sleep (0, 50000 * (n_tries - 4)); } if (action_bp) bp_delayed_stat_action (action_bp); if (buf->bd_readers != 1) GPF_T1 ("expecting buf to be wired down when allocated"); buf_dbg_printf (("Buf %x new in tree %x dp=%d\n", buf, isp, physical_dp)); itm = IT_DP_MAP (it, physical_dp); mutex_enter (&itm->itm_mtx); sethash (DP_ADDR2VOID (physical_dp), &itm->itm_dp_to_buf, (void*) buf); sethash (DP_ADDR2VOID (physical_dp), &itm->itm_remap, DP_ADDR2VOID (physical_dp)); buf->bd_page = physical_dp; buf->bd_physical_page = physical_dp; buf->bd_tree = it; buf->bd_storage = it->it_storage; buf->bd_pl = NULL; buf->bd_readers = 0; BD_SET_IS_WRITE (buf, 1); mutex_leave (&itm->itm_mtx); if (em != em->em_dbs->dbs_extent_map && EXT_INDEX == ext_type) { mutex_enter (em->em_mtx); remhash (DP_ADDR2VOID(physical_dp), em->em_uninitialized); mutex_leave (em->em_mtx); } #ifdef PAGE_TRACE memset (buf->bd_buffer, 0, PAGE_SZ); /* all for debug view */ #else memset (buf->bd_buffer, 0, PAGE_SZ - PAGE_DATA_SZ); /* header only */ #endif SHORT_SET (buf->bd_buffer + DP_FLAGS, type); if (type == DPF_INDEX) { page_map_t * map = buf->bd_content_map; if (!map) buf->bd_content_map = (page_map_t*) resource_get (PM_RC (PM_SZ_1)); else { if (map->pm_size > PM_SZ_1) { resource_store (PM_RC (map->pm_size), (void*) map); buf->bd_content_map = (page_map_t *) resource_get (PM_RC (PM_SZ_1)); } } pg_map_clear (buf); LONG_SET (buf->bd_buffer + DP_KEY_ID, it->it_key ? it->it_key->key_id : KI_TEMP); } else if (buf->bd_content_map) { resource_store (PM_RC (buf->bd_content_map->pm_size), (void*) buf->bd_content_map); buf->bd_content_map = NULL; } buf_set_dirty (buf); DBG_PT_PRINTF (("New page L=%d B=%p FL=%d K=%s \n", buf->bd_page, buf, type, it->it_key ? (it->it_key->key_name ? it->it_key->key_name : "unnamed key") : "no key")); TC (tc_new_page); return buf; }