static void free_res( restrict_u * res, int v6 ) { restrict_u ** plisthead; restrict_u * unlinked; restrictcount--; if (RES_LIMITED & res->flags) dec_res_limited(); if (v6) plisthead = &restrictlist6; else plisthead = &restrictlist4; UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u); INSIST(unlinked == res); if (v6) { zero_mem(res, V6_SIZEOF_RESTRICT_U); plisthead = &resfree6; } else { zero_mem(res, V4_SIZEOF_RESTRICT_U); plisthead = &resfree4; } LINK_SLIST(*plisthead, res, link); }
/* * mon_stop - stop the monitoring software */ void mon_stop( int mode ) { mon_entry *mon; if (MON_OFF == mon_enabled) return; if ((mon_enabled & mode) == 0 || mode == MON_OFF) return; mon_enabled &= ~mode; if (mon_enabled != MON_OFF) return; /* * Move everything on the MRU list to the free list quickly, * without bothering to remove each from either the MRU list or * the hash table. */ ITER_DLIST_BEGIN(mon_mru_list, mon, mru, mon_entry) mon_free_entry(mon); ITER_DLIST_END() /* empty the MRU list and hash table. */ mru_entries = 0; INIT_DLIST(mon_mru_list, mru); zero_mem(mon_hash, sizeof(*mon_hash) * MON_HASH_SIZE); }
void init_seg(){ seg_init(); idt_init(); zero_mem(&interrupt_handlers, sizeof(w_isr) * 256); //tss_flush(); }
void init_mem() { zero_mem(); memory->fill_number = 0; memory->cur_pos = 0; memory->free_bytes = MEM_SIZE; memory->heaps_count = 0; memory->free_blocks_count = 0; memory->mem_usage.total_alloc = 0; memory->mem_usage.total_alloc_calls = 0; memory->mem_usage.total_free_calls = 0; return; }
void free_locked_pages(void* ptr, size_t length) { if(ptr == nullptr || length == 0) return; #if defined(BOTAN_TARGET_OS_HAS_POSIX_MLOCK) zero_mem(ptr, length); ::munlock(ptr, length); ::munmap(ptr, length); #else // Invalid argument because no way this pointer was allocated by us throw Invalid_Argument("Invalid ptr to free_locked_pages"); #endif }
void Compression_Alloc_Info::do_free(void* ptr) { if(ptr) { auto i = m_current_allocs.find(ptr); if(i == m_current_allocs.end()) throw std::runtime_error("Compression_Alloc_Info::free got pointer not allocated by us"); zero_mem(ptr, i->second); std::free(ptr); m_current_allocs.erase(i); } }
/* * allocate_memory(): Simple memory allocation routine */ void_t *allocate_memory(uint64_t size_request) { uint64_t address; if (heap_current + size_request > heap_tops) { PRINT_STRING("Allocation request exceeds heap's size\r\n"); PRINT_STRING_AND_VALUE("Heap current = 0x", heap_current); PRINT_STRING_AND_VALUE("Requested size = 0x", size_request); PRINT_STRING_AND_VALUE("Heap tops = 0x", heap_tops); return NULL; } address = ALIGN_FORWARD(heap_current, MEM_ALLOCATE_ALIGNMENT); heap_current += size_request; zero_mem((void_t *)address, size_request); return (void_t *)address; }
//////////////////////// {B} CORE FUNCTIONS //////////////////////// void *mm_malloc_ll(size_t size) { if(size == 0) { printf("SIZE REQUEST 0, RETURNING NULL\n"); return NULL; } else { void *ptr; int status = SUCCESS; size = pad_mem_size(size); pthread_mutex_lock(&lock); if(malloc_head == NULL) { status = initialize(size); } if(status == ERROR) { pthread_mutex_unlock(&lock); return NULL; } //print_free_blocks(); ptr = req_free_mem(size); if(ptr == NULL) { status = add_new_mem(size); ptr = req_free_mem(size); } if(status == ERROR) { printf("Failure to allocate.\n"); pthread_mutex_unlock(&lock); return NULL; } zero_mem(ptr); pthread_mutex_unlock(&lock); return ptr; } }
void *CDECL mon_page_alloc(uint64_t pages) { uint64_t address; uint64_t size = pages * PAGE_SIZE; address = ALIGN_FORWARD(heap_current, PAGE_SIZE); if (address + size > heap_tops) { PRINT_STRING("Allocation request exceeds heap's size\r\n"); PRINT_STRING_AND_VALUE("Page aligned heap current = 0x", address); PRINT_STRING_AND_VALUE("Requested size = 0x", size); PRINT_STRING_AND_VALUE("Heap tops = 0x", heap_tops); return NULL; } heap_current = address + size; zero_mem((void *)address, size); return (void *)address; }
sockaddr_u * netof( sockaddr_u *hostaddr ) { static sockaddr_u netofbuf[8]; static int next_netofbuf; u_int32 netnum; sockaddr_u * netaddr; netaddr = &netofbuf[next_netofbuf]; next_netofbuf = (next_netofbuf + 1) % COUNTOF(netofbuf); memcpy(netaddr, hostaddr, sizeof(*netaddr)); if (IS_IPV4(netaddr)) { netnum = SRCADR(netaddr); /* * We live in a modern CIDR world where the basement nets, which * used to be class A, are now probably associated with each * host address. So, for class-A nets, all bits are significant. */ if (IN_CLASSC(netnum)) netnum &= IN_CLASSC_NET; else if (IN_CLASSB(netnum)) netnum &= IN_CLASSB_NET; SET_ADDR4(netaddr, netnum); } else if (IS_IPV6(netaddr)) /* assume the typical /64 subnet size */ zero_mem(&NSRCADR6(netaddr)[8], 8); #ifdef DEBUG else { msyslog(LOG_ERR, "netof unknown AF %d", AF(netaddr)); exit(1); } #endif return netaddr; }
void * ereallocz( void * ptr, size_t newsz, size_t priorsz, int zero_init #ifdef EREALLOC_CALLSITE /* ntp_malloc.h */ , const char * file, int line #endif ) { char * mem; size_t allocsz; if (0 == newsz) allocsz = 1; else allocsz = newsz; mem = EREALLOC_IMPL(ptr, allocsz, file, line); if (NULL == mem) { msyslog_term = TRUE; #ifndef EREALLOC_CALLSITE msyslog(LOG_ERR, "fatal out of memory (%lu bytes)", (u_long)newsz); #else msyslog(LOG_ERR, "fatal out of memory %s line %d (%lu bytes)", file, line, (u_long)newsz); #endif exit(1); } if (zero_init && newsz > priorsz) zero_mem(mem + priorsz, newsz - priorsz); return mem; }
/* * getresponse - get a (series of) response packet(s) and return the data */ static int getresponse( int implcode, int reqcode, size_t *ritems, size_t *rsize, const char **rdata, size_t esize ) { struct resp_pkt rpkt; struct sock_timeval tvo; size_t items; size_t i; size_t size; size_t datasize; char *datap; char *tmp_data; char haveseq[MAXSEQ+1]; int firstpkt; int lastseq; int numrecv; int seq; fd_set fds; ssize_t n; int pad; /* absolute timeout checks. Not 'time_t' by intention! */ uint32_t tobase; /* base value for timeout */ uint32_t tospan; /* timeout span (max delay) */ uint32_t todiff; /* current delay */ /* * This is pretty tricky. We may get between 1 and many packets * back in response to the request. We peel the data out of * each packet and collect it in one long block. When the last * packet in the sequence is received we'll know how many we * should have had. Note we use one long time out, should reconsider. */ *ritems = 0; *rsize = 0; firstpkt = 1; numrecv = 0; *rdata = datap = pktdata; lastseq = 999; /* too big to be a sequence number */ ZERO(haveseq); FD_ZERO(&fds); tobase = (uint32_t)time(NULL); again: if (firstpkt) tvo = tvout; else tvo = tvsout; tospan = (uint32_t)tvo.tv_sec + (tvo.tv_usec != 0); FD_SET(sockfd, &fds); n = select(sockfd+1, &fds, NULL, NULL, &tvo); if (n == -1) { warning("select fails"); return -1; } /* * Check if this is already too late. Trash the data and fake a * timeout if this is so. */ todiff = (((uint32_t)time(NULL)) - tobase) & 0x7FFFFFFFu; if ((n > 0) && (todiff > tospan)) { n = recv(sockfd, (char *)&rpkt, sizeof(rpkt), 0); n -= n; /* faked timeout return from 'select()'*/ } if (n == 0) { /* * Timed out. Return what we have */ if (firstpkt) { (void) fprintf(stderr, "%s: timed out, nothing received\n", currenthost); return ERR_TIMEOUT; } else { (void) fprintf(stderr, "%s: timed out with incomplete data\n", currenthost); if (debug) { printf("Received sequence numbers"); for (n = 0; n <= MAXSEQ; n++) if (haveseq[n]) printf(" %zd,", (size_t)n); if (lastseq != 999) printf(" last frame received\n"); else printf(" last frame not received\n"); } return ERR_INCOMPLETE; } } n = recv(sockfd, (char *)&rpkt, sizeof(rpkt), 0); if (n == -1) { warning("read"); return -1; } /* * Check for format errors. Bug proofing. */ if (n < (ssize_t)RESP_HEADER_SIZE) { if (debug) printf("Short (%zd byte) packet received\n", (size_t)n); goto again; } if (INFO_VERSION(rpkt.rm_vn_mode) > NTP_VERSION || INFO_VERSION(rpkt.rm_vn_mode) < NTP_OLDVERSION) { if (debug) printf("Packet received with version %d\n", INFO_VERSION(rpkt.rm_vn_mode)); goto again; } if (INFO_MODE(rpkt.rm_vn_mode) != MODE_PRIVATE) { if (debug) printf("Packet received with mode %d\n", INFO_MODE(rpkt.rm_vn_mode)); goto again; } if (INFO_IS_AUTH(rpkt.auth_seq)) { if (debug) printf("Encrypted packet received\n"); goto again; } if (!ISRESPONSE(rpkt.rm_vn_mode)) { if (debug) printf("Received request packet, wanted response\n"); goto again; } if (INFO_MBZ(rpkt.mbz_itemsize) != 0) { if (debug) printf("Received packet with nonzero MBZ field!\n"); goto again; } /* * Check implementation/request. Could be old data getting to us. */ if (rpkt.implementation != implcode || rpkt.request != reqcode) { if (debug) printf( "Received implementation/request of %d/%d, wanted %d/%d", rpkt.implementation, rpkt.request, implcode, reqcode); goto again; } /* * Check the error code. If non-zero, return it. */ if (INFO_ERR(rpkt.err_nitems) != INFO_OKAY) { if (debug && ISMORE(rpkt.rm_vn_mode)) { printf("Error code %d received on not-final packet\n", INFO_ERR(rpkt.err_nitems)); } return (int)INFO_ERR(rpkt.err_nitems); } /* * Collect items and size. Make sure they make sense. */ items = INFO_NITEMS(rpkt.err_nitems); size = INFO_ITEMSIZE(rpkt.mbz_itemsize); if (esize > size) pad = esize - size; else pad = 0; datasize = items * size; if ((size_t)datasize > (n-RESP_HEADER_SIZE)) { if (debug) printf( "Received items %zu, size %zu (total %zu), data in packet is %zu\n", items, size, datasize, n-RESP_HEADER_SIZE); goto again; } /* * If this isn't our first packet, make sure the size matches * the other ones. */ if (!firstpkt && size != *rsize) { if (debug) printf("Received itemsize %zu, previous %zu\n", size, *rsize); goto again; } /* * If we've received this before, +toss it */ seq = INFO_SEQ(rpkt.auth_seq); if (haveseq[seq]) { if (debug) printf("Received duplicate sequence number %d\n", seq); goto again; } haveseq[seq] = 1; /* * If this is the last in the sequence, record that. */ if (!ISMORE(rpkt.rm_vn_mode)) { if (lastseq != 999) { printf("Received second end sequence packet\n"); goto again; } lastseq = seq; } /* * So far, so good. Copy this data into the output array. Bump * the timeout base, in case we expect more data. */ tobase = (uint32_t)time(NULL); if ((datap + datasize + (pad * items)) > (pktdata + pktdatasize)) { size_t offset = datap - pktdata; growpktdata(); *rdata = pktdata; /* might have been realloced ! */ datap = pktdata + offset; } /* * We now move the pointer along according to size and number of * items. This is so we can play nice with older implementations */ tmp_data = rpkt.u.data; for (i = 0; i < items; i++) { memcpy(datap, tmp_data, (unsigned)size); tmp_data += size; zero_mem(datap + size, pad); datap += size + pad; } if (firstpkt) { firstpkt = 0; *rsize = size + pad; } *ritems += items; /* * Finally, check the count of received packets. If we've got them * all, return */ ++numrecv; if (numrecv <= lastseq) goto again; return INFO_OKAY; }
void init() { struct ext_mem_format *ptr_exts; // structure-pointer to structure struct ext_mem_format temp_struc; // temporary structure struct inode ino_kern; struct ext_mem_format ext_mem_strucs[50]; // 386 usable RAM list format usec_t ino_sec; ubyte_t num_servs; // number of servers to read to memory uint_t tot_mem, counter, counter1; // counters byte *ptr_mem=(byte *)ext_mem_strucs; // byte-pointer to structure /* * Get absolute sector for first ivector. * It is stored in a global variable. */ sosfs_ivec_first(); /* read kernel's and server's inode sector */ ino_sec = sosfs_iget("/boot/kernel"); /* if error */ if (ino_sec == -1) { low_putstr("Inode not found\n"); while (1); } /* read kernel's inode */ sosfs_read_raw(ino_sec, &ino_kern); /* zero out the vector */ for (tot_mem=0; tot_mem<512; tot_mem++) buffer[tot_mem]=0; /* * Copy arguments from real-mode to protected-mode. * The monitor program wishes to pass the arguments to the kernel. * This is done by a little hack that uses an interrupt instruction * passing a buffer to copy from real-mode to protected-mode using * BIOS. */ kargs_init(); /* * Get total amount of RAM. * The most reliable way to know the system's memory-mapping * is by using the BIOS; we use int $0x15 function 0xe820. */ low_tot_ram(ext_mem_strucs); /* point to memory-map vector */ ptr_exts=ext_mem_strucs; /* * Traverse the structures until magic number found. * Our interrupt handler set a magic number after the last * entry returned by the BIOS handler. */ kprintf("\n"); kprintf("BIOS-provided physical-memory mappings\n"); kprintf("======================================================\n"); while (ptr_exts->acpi_ext != 0x12345) { /* if we must ignore the entry, so we do */ if (ptr_exts->reg_len == 0) continue; /* print type of memory to terminal */ switch (ptr_exts->reg_type) { case 1: kprintf("Usable RAM at "); if (ptr_exts->base_addr[0] != 0) { zero_mem(ptr_exts); put_list(ptr_exts); } break; case 2: kprintf("Reserved/unusable RAM at "); break; case 3: case 4: case 5: kprintf("ACPI RAM at "); break; } /* * Create a temporary structure and copy the entire structure * to it. * Print the address range. */ temp_struc = *ptr_exts; // copy structure temp_struc.reg_len[0] += temp_struc.base_addr[0]; kprintf("%x-", &temp_struc); kprintf("%x", temp_struc.reg_len); kprintf("\n"); for (tot_mem=0; tot_mem<512; tot_mem++) buffer[tot_mem]=0; ptr_exts++; // advance one structure } kprintf("======================================================\n"); /* * Set up initial memory mappings. * Load user-level servers to predefined physical memory and * identically map them. Map the monitor program and the kernel * also to identical physical addresses. Also map those programs' * heaps. */ load_init(); heap_init(); /* list returned by BIOS might be unsorted */ sort_free_list(); /* machine-dependent format to machine-independent format */ dep_2_indep_list(ptr_list); mmap_init(); enable_paging(); SOS_init(); }
static void idt_init(){ i_ptr.limit = sizeof(struct w_idte) * 256 -1; i_ptr.base = (w_uint32)&idt_entries; zero_mem(&idt_entries, sizeof(struct w_idte) * 256); /* Remap IRQ table */ out_byte(0x20, 0x11); out_byte(0xA0, 0x11); out_byte(0x21, 0x20); out_byte(0xA1, 0x28); out_byte(0x21, 0x04); out_byte(0xA1, 0x02); out_byte(0x21, 0x01); out_byte(0xA1, 0x01); out_byte(0x21, 0x0); out_byte(0xA1, 0x0); set_idt( 0, (w_uint32)isr0 , SEG_KERNEL_CODE, 0x8E); set_idt( 1, (w_uint32)isr1 , SEG_KERNEL_CODE, 0x8E); set_idt( 2, (w_uint32)isr2 , SEG_KERNEL_CODE, 0x8E); set_idt( 3, (w_uint32)isr3 , SEG_KERNEL_CODE, 0x8E); set_idt( 4, (w_uint32)isr4 , SEG_KERNEL_CODE, 0x8E); set_idt( 5, (w_uint32)isr5 , SEG_KERNEL_CODE, 0x8E); set_idt( 6, (w_uint32)isr6 , SEG_KERNEL_CODE, 0x8E); set_idt( 7, (w_uint32)isr7 , SEG_KERNEL_CODE, 0x8E); set_idt( 8, (w_uint32)isr8 , SEG_KERNEL_CODE, 0x8E); set_idt( 9, (w_uint32)isr9 , SEG_KERNEL_CODE, 0x8E); set_idt(10, (w_uint32)isr10, SEG_KERNEL_CODE, 0x8E); set_idt(11, (w_uint32)isr11, SEG_KERNEL_CODE, 0x8E); set_idt(12, (w_uint32)isr12, SEG_KERNEL_CODE, 0x8E); set_idt(13, (w_uint32)isr13, SEG_KERNEL_CODE, 0x8E); set_idt(14, (w_uint32)isr14, SEG_KERNEL_CODE, 0x8E); set_idt(15, (w_uint32)isr15, SEG_KERNEL_CODE, 0x8E); set_idt(16, (w_uint32)isr16, SEG_KERNEL_CODE, 0x8E); set_idt(17, (w_uint32)isr17, SEG_KERNEL_CODE, 0x8E); set_idt(18, (w_uint32)isr18, SEG_KERNEL_CODE, 0x8E); set_idt(19, (w_uint32)isr19, SEG_KERNEL_CODE, 0x8E); set_idt(20, (w_uint32)isr20, SEG_KERNEL_CODE, 0x8E); set_idt(21, (w_uint32)isr21, SEG_KERNEL_CODE, 0x8E); set_idt(22, (w_uint32)isr22, SEG_KERNEL_CODE, 0x8E); set_idt(23, (w_uint32)isr23, SEG_KERNEL_CODE, 0x8E); set_idt(24, (w_uint32)isr24, SEG_KERNEL_CODE, 0x8E); set_idt(25, (w_uint32)isr25, SEG_KERNEL_CODE, 0x8E); set_idt(26, (w_uint32)isr26, SEG_KERNEL_CODE, 0x8E); set_idt(27, (w_uint32)isr27, SEG_KERNEL_CODE, 0x8E); set_idt(28, (w_uint32)isr28, SEG_KERNEL_CODE, 0x8E); set_idt(29, (w_uint32)isr29, SEG_KERNEL_CODE, 0x8E); set_idt(30, (w_uint32)isr30, SEG_KERNEL_CODE, 0x8E); set_idt(31, (w_uint32)isr31, SEG_KERNEL_CODE, 0x8E); set_idt(32, (w_uint32)irq0, SEG_KERNEL_CODE, 0x8E); set_idt(33, (w_uint32)irq1, SEG_KERNEL_CODE, 0x8E); set_idt(34, (w_uint32)irq2, SEG_KERNEL_CODE, 0x8E); set_idt(35, (w_uint32)irq3, SEG_KERNEL_CODE, 0x8E); set_idt(36, (w_uint32)irq4, SEG_KERNEL_CODE, 0x8E); set_idt(37, (w_uint32)irq5, SEG_KERNEL_CODE, 0x8E); set_idt(38, (w_uint32)irq6, SEG_KERNEL_CODE, 0x8E); set_idt(39, (w_uint32)irq7, SEG_KERNEL_CODE, 0x8E); set_idt(40, (w_uint32)irq8, SEG_KERNEL_CODE, 0x8E); set_idt(41, (w_uint32)irq9, SEG_KERNEL_CODE, 0x8E); set_idt(42, (w_uint32)irq10, SEG_KERNEL_CODE, 0x8E); set_idt(43, (w_uint32)irq11, SEG_KERNEL_CODE, 0x8E); set_idt(44, (w_uint32)irq12, SEG_KERNEL_CODE, 0x8E); set_idt(45, (w_uint32)irq13, SEG_KERNEL_CODE, 0x8E); set_idt(46, (w_uint32)irq14, SEG_KERNEL_CODE, 0x8E); set_idt(47, (w_uint32)irq15, SEG_KERNEL_CODE, 0x8E); idt_flush(&i_ptr); }