void clear_table_pgstes(unsigned long *table) { clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); memset(table + 256, 0, PAGE_SIZE/4); clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); memset(table + 768, 0, PAGE_SIZE/4); }
/* Make sure the minor heap is empty by performing a minor collection if needed. */ void caml_empty_minor_heap (void) { value **r; uintnat prev_alloc_words; if (caml_young_ptr != caml_young_end) { if (caml_minor_gc_begin_hook != NULL) (*caml_minor_gc_begin_hook) (); prev_alloc_words = caml_allocated_words; caml_in_minor_collection = 1; caml_gc_message (0x02, "<", 0); caml_oldify_local_roots(); for (r = caml_ref_table.base; r < caml_ref_table.ptr; r++) { caml_oldify_one (**r, *r); } caml_oldify_mopup (); for (r = caml_weak_ref_table.base; r < caml_weak_ref_table.ptr; r++) { if (Is_block (**r) && Is_young (**r)) { if (Hd_val (**r) == 0) { **r = Field (**r, 0); } else { **r = caml_weak_none; } } } if (caml_young_ptr < caml_young_start) caml_young_ptr = caml_young_start; caml_stat_minor_words += Wsize_bsize (caml_young_end - caml_young_ptr); caml_young_ptr = caml_young_end; caml_young_limit = caml_young_start; clear_table (&caml_ref_table); clear_table (&caml_weak_ref_table); caml_gc_message (0x02, ">", 0); caml_in_minor_collection = 0; caml_stat_promoted_words += caml_allocated_words - prev_alloc_words; ++ caml_stat_minor_collections; caml_final_empty_young (); if (caml_minor_gc_end_hook != NULL) (*caml_minor_gc_end_hook) (); } else { caml_final_empty_young (); } #ifdef DEBUG { value *p; for (p = (value *) caml_young_start; p < (value *) caml_young_end; ++p) { *p = Debug_free_minor; } ++ minor_gc_counter; } #endif }
hash_t *hash_create(hashcount_t maxcount, hash_comp_t compfun, hash_fun_t hashfun) { hash_t *hash; if (hash_val_t_bit == 0) /* 1 */ compute_bits(); hash = malloc(sizeof *hash); /* 2 */ if (hash) { /* 3 */ hash->table = malloc(sizeof *hash->table * INIT_SIZE); /* 4 */ if (hash->table) { /* 5 */ hash->nchains = INIT_SIZE; /* 6 */ hash->highmark = INIT_SIZE * 2; hash->lowmark = INIT_SIZE / 2; hash->nodecount = 0; hash->maxcount = maxcount; hash->compare = compfun ? compfun : hash_comp_default; hash->function = hashfun ? hashfun : hash_fun_default; hash->allocnode = hnode_alloc; hash->freenode = hnode_free; hash->context = NULL; hash->mask = INIT_MASK; hash->dynamic = 1; /* 7 */ clear_table(hash); /* 8 */ assert (hash_verify(hash)); return hash; } free(hash); } return NULL; }
table::~table() { clear_table(); clear_header(); clear_status(); endwin(); }
int table::create_table(int nrow, int ncol) { clear_table(); int dX = m_CW * ncol/2; int dY = m_CH * nrow/2; for(int row = 0; row < nrow; row++) { int x0 = m_W0 - dX; int y0 = m_H0 + m_CH * row - dY; row_t new_row; new_row.X0 = x0; new_row.Y0 = y0; for(int col = 0; col < ncol; col++) { int xn = x0 + m_CW * col; int yn = y0; WINDOW *w = newwin(m_CH, m_CW, yn, xn); if(!w) break; box(w, 0 , 0); wrefresh(w); new_row.w.push_back(w); } m_table.push_back(new_row); } return m_table.size(); }
static void read_table(void) { int tab; struct fstab *ent; MountPoint *mp; clear_table(); tab = setfsent(); g_return_if_fail(tab != 0); while ((ent = getfsent())) { if (strcmp(ent->fs_vfstype, "swap") == 0) continue; if (strcmp(ent->fs_vfstype, "kernfs") == 0) continue; mp = g_malloc(sizeof(MountPoint)); mp->name = g_strdup(ent->fs_spec); /* block special device name */ mp->dir = g_strdup(ent->fs_file); /* file system path prefix */ g_hash_table_insert(fstab_mounts, mp->dir, mp); } endfsent(); }
void __init paging_init(void) { static const int ssm_mask = 0x04000000L; unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type; init_mm.pgd = swapper_pg_dir; S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; #ifdef CONFIG_64BIT /* A three level page table (4TB) is enough for the kernel space. */ S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; #else S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; pgd_type = _SEGMENT_ENTRY_EMPTY; #endif clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); __raw_local_irq_ssm(ssm_mask); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); }
/* * paging_init() sets up the page tables */ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; } else { asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); __arch_local_irq_stosm(0x04); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); }
static void update_cache(lua_State *L, const void *data, const void * newdata) { lua_getfield(L, LUA_REGISTRYINDEX, NODECACHE); int t = lua_gettop(L); lua_getfield(L, LUA_REGISTRYINDEX, PROXYCACHE); int pt = t + 1; lua_newtable(L); // temp table int nt = pt + 1; lua_pushnil(L); while (lua_next(L, t) != 0) { // pointer (-2) -> table (-1) lua_pushvalue(L, -1); if (lua_rawget(L, pt) == LUA_TUSERDATA) { // pointer, table, proxy struct proxy * p = lua_touserdata(L, -1); if (p->data == data) { // update to newdata p->data = newdata; const struct table * newt = gettable(newdata, p->index); lua_pop(L, 1); // pointer, table clear_table(L); lua_pushvalue(L, lua_upvalueindex(1)); // pointer, table, meta lua_setmetatable(L, -2); // pointer, table if (newt) { lua_rawsetp(L, nt, newt); } else { lua_pop(L, 1); } // pointer lua_pushvalue(L, -1); lua_pushnil(L); lua_rawset(L, t); } else { lua_pop(L, 2); } } else { lua_pop(L, 2); // pointer } } // copy nt to t lua_pushnil(L); while (lua_next(L, nt) != 0) { lua_pushvalue(L, -2); lua_insert(L, -2); // key key value lua_rawset(L, t); } // NODECACHE PROXYCACHE TEMP lua_pop(L, 3); }
HashTable* hash_table_new(HashFunc hashFunc, EqualsFunc equalsFunc) { HashTable* table = (HashTable*)malloc(sizeof(HashTable)); table->hashFunc = hashFunc; table->equalsFunc = equalsFunc; table->size = 0; table->capacity = 2; table->table = (Node**)calloc(table->capacity, sizeof(Node*)); clear_table(table, FALSE); return table; }
void hash_free_nodes(hash_t *hash) { hscan_t hs; hnode_t *node; hash_scan_begin(&hs, hash); while ((node = hash_scan_next(&hs))) { hash_scan_delete(hash, node); hash->freenode(node, hash->context); } hash->nodecount = 0; clear_table(hash); }
static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; #ifdef CONFIG_64BIT pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); #endif return pud; }
/* del_hash_table frees the memory allocated for the hash table param: hash_table_t** hash_table (a pointer to the hash table to free) return: void */ int del_hash_table(hash_table_t** hash_table) { if(!hash_table || !(*hash_table))return -1; //free the memory allocated for the lists array in the given hash table clear_table(*hash_table); //free the hash table struct free(*hash_table); *hash_table = NULL; return 0; }
static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; #ifdef CONFIG_64BIT pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); #endif return pmd; }
void assign_cells(void) { clear_table(); //table_draw_hist_bg(1.0); for (i=0; i<NUM_LIGHTS; i++) { draw_pulse(i); } }
void histogram(struct visual_params *arg) { int i, j, color_index; clear_table(); for (j = 0; j < COLS; j++) { for (i = 0; i < ROWS_E; i++) { if (prob(arg->energy)) { arg->color(&table[i][j], i); } else { break; } } } }
/* * page table entry allocation/free routines. */ unsigned long *page_table_alloc(int noexec) { struct page *page = alloc_page(GFP_KERNEL); unsigned long *table; if (!page) return NULL; page->index = 0; if (noexec) { struct page *shadow = alloc_page(GFP_KERNEL); if (!shadow) { __free_page(page); return NULL; } table = (unsigned long *) page_to_phys(shadow); clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); page->index = (addr_t) table; } table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); return table; }
void init_table(void) { // randomize all the pulses for (i=0; i<NUM_LIGHTS; i++) { pulses[i].x = rand() % TABLE_WIDTH; pulses[i].y = rand() % TABLE_HEIGHT; pulses[i].decay = 0; pulses[i].radius = PULSE_RADIUS; } clear_table(); clear_tmp_table(); }
void free_table( struct table *table ) { if (!table) return; clear_table( table ); if (table->flags & TABLE_FLAG_DYNAMIC) { TRACE("destroying %p\n", table); heap_free( (WCHAR *)table->name ); free_columns( (struct column *)table->columns, table->num_cols ); list_remove( &table->entry ); heap_free( table ); } }
static pte_t __ref *vmem_pte_alloc(void) { pte_t *pte; if (slab_is_available()) pte = (pte_t *) page_table_alloc(&init_mm); else pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) return NULL; clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PTRS_PER_PTE * sizeof(pte_t)); return pte; }
void fill_row(struct visual_params *arg) { int i, j; char is_full = 1; for (i = 0; i < ROWS_E; i++) { for (j = 0; j < COLS; j++) { if (!rgb_nz(&table[i][j])) { is_full = 0; } } } if (is_full) { clear_table(); return; } drag(0, arg->dir ? -1 : 1); strip_row_x(arg, arg->dir ? ROWS_E - 1 : 0); }
int main(int argc, char** argv){ NDB_INIT(argv[0]); load_defaults("my",load_default_groups,&argc,&argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:O,/tmp/ndb_delete_all.trace"; #endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); Ndb_cluster_connection con(opt_connect_str); con.set_name("ndb_delete_all"); if(con.connect(12, 5, 1) != 0) { ndbout << "Unable to connect to management server." << endl; return NDBT_ProgramExit(NDBT_FAILED); } if (con.wait_until_ready(30,0) < 0) { ndbout << "Cluster nodes not ready in 30 seconds." << endl; return NDBT_ProgramExit(NDBT_FAILED); } Ndb MyNdb(&con, _dbname ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } // Check if table exists in db int res = NDBT_OK; for(int i = 0; i<argc; i++){ const NdbDictionary::Table * pTab = NDBT_Table::discoverTableFromDb(&MyNdb, argv[i]); if(pTab == NULL){ ndbout << " Table " << argv[i] << " does not exist!" << endl; return NDBT_ProgramExit(NDBT_WRONGARGS); } ndbout << "Deleting all from " << argv[i]; if (! _transactional) ndbout << " (non-transactional)"; ndbout << " ..."; if(clear_table(&MyNdb, pTab, ! _transactional) == NDBT_FAILED){ res = NDBT_FAILED; ndbout << "FAILED" << endl; } } return NDBT_ProgramExit(res); }
void arch_release_hugepage(struct page *page) { pte_t *ptep; if (MACHINE_HAS_HPAGE) return; ptep = (pte_t *) page[1].index; if (!ptep) return; clear_table((unsigned long *) ptep, _PAGE_INVALID, PTRS_PER_PTE * sizeof(pte_t)); page_table_free(&init_mm, (unsigned long *) ptep); page[1].index = 0; }
/** * process(fmt, data) * * Purpose: read from datafile, format and output selected records * Input: fmt - input stream from format file * data - stream from datafile * Output: copied fmt to stdout with insertions * Errors: not reported, functions call fatal() and die * history: 2012-11-28 added free_table (10q BW) **/ int process(FILE *fmt, FILE *data) { symtab_t *tab; if ( (tab = new_table()) == NULL ) fatal("Cannot create storage object",""); while ( get_record(tab,data) != NO )/* while more data */ { printf("Inside the process while loop\n"); mailmerge( tab, fmt ); /* merge with format */ clear_table(tab); /* discard data */ } //free_table(tab); /* no memory leaks! */ }
int main() { Table *table_ptr; int x, y, table_size = 5; void *value; /***** Starting memory checking *****/ start_memory_check(); /***** Starting memory checking *****/ create_table(&table_ptr, table_size, NULL); x = 2; put(table_ptr, "Peggy", &x); y = 1; put(table_ptr, "John", &y); printf("Table size: %d\n", get_table_size(table_ptr)); if (is_empty(table_ptr)) { printf("Empty table\n"); } else { printf("Not empty\n"); } get_value(table_ptr, "John", &value); printf("Value for John %d\n", *(int *) value); printf("Removing Peggy: %d\n", remove_entry(table_ptr, "Peggy")); printf("Displaying Table\n"); display_table(table_ptr); clear_table(table_ptr); printf("After clearing\n"); if (is_empty(table_ptr)) { printf("Empty table\n"); } else { printf("Not empty\n"); } printf("Displaying Table\n"); display_table(table_ptr); destroy_table(table_ptr); /****** Gathering memory checking info *****/ stop_memory_check(); /****** Gathering memory checking info *****/ return 0; }
/*Test4 tests inserting a large number of keys, chekcing key_count, deleting a few, rechecking the key_count, then clearing the entire table. We will then reinsert some keys.*/ static int test4(){ int x = 1, y =2, z = 3, w =4, k = 5, a =6, b =7 , c =8, d = 9, e = 10, f = 11, g = 12; Table *t; /*Creates table and adds 13 keys to it.*/ create_table(&t, z, NULL); put(t, "Krabs", &a); put(t, "Larry", &b); put(t, "Herminator", &c); put(t, "Brian", &d); put(t, "Steve", &e); put(t, "Nelson", &f); put(t, "Corwin", &g); put(t, "Spongebob", &x); put(t, "Patrick", &y); put(t, "Squidward", &z); put(t, "Sandy", &w); put(t, "Art thou feeling it now,", NULL); put(t, "Mr. Krabs?", &k); printf("%d\n", get_key_count(t)); display_table(t); /*Removes two keys from the table, then rechekcs key_count and displays the state of the table.*/ remove_entry(t,"Mr. Krabs?"); remove_entry(t,"Art thou feeling it now,"); printf("%d\n", get_key_count(t)); display_table(t); /*Clears the table, then rechecks the key_count.*/ clear_table(t); printf("%d\n", get_key_count(t)); /*Reinserts 7 keys, then checks the state of the table again.*/ put(t, "A1", &a); put(t, "A2", &b); put(t, "A3", &c); put(t, "A4", &d); put(t, "A5", &e); put(t, "A6", &f); put(t, "A7", &g); display_table(t); destroy_table(t); return SUCCESS; }
void strips_diag(struct visual_params *arg) { int i, j, color_index; clear_table(); for (j = -4; j < COLS + 4; j++) { if (prob(arg->energy)) { color_index = rand() % 7; for (i = 0; i < ROWS_E; i++) { if (arg->dir) { if (j + i < COLS && j + i >= 0) { arg->color(&table[i][j + i], color_index); } } else { if (j + i < COLS && j + i >= 0) { arg->color(&table[i][j - i], color_index); } } } } } }
void draw_frame(struct visual_params *arg, char frame[4][4], int p) { int s, i, j, col; struct rgb *out; clear_table(); for (s = 0; s < SQUARES; s++) { if (prob(p)) { for (i = 0; i < ROWS_P; i++) { for (j = 0; j < COLS_P; j++) { col = j + COLS_P*s; out = &table[i + get_base_height(col)][col]; if (frame[i][j]) { arg->color(out, frame[i][j]); } else { rgb_init(out, 0, 0, 0); } } } } } }
/* * paging_init() sets up the page tables */ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; #ifdef CONFIG_64BIT if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; } else { asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } #else asce_bits = _ASCE_TABLE_LENGTH; pgd_type = _SEGMENT_ENTRY_EMPTY; #endif init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; S390_lowcore.kernel_asce = init_mm.context.asce; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); atomic_set(&init_mm.context.attach_count, 1); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); }
void ftsp_init(void) { msg.node_id = node_id; if (node_id == SINK_ID) { msg.root_id = node_id; } else { msg.root_id = 0xFFFF; } skew = 0.0; local_average = 0; offset_average = 0; clear_table(); state = STATE_INIT; mode = TS_TIMER_MODE; heart_beats = 0; num_errors = 0; process_start(&ftsp_process, NULL); process_start(&ftsp_msg_process, NULL); process_start(&ftsp_send_process, NULL); }