int main() { /* Simple test */ pmap_t pmap; pmap_init(&pmap); pmap.asid = 10; set_active_pmap(&pmap); vm_page_t *pg1 = pm_alloc(4); vaddr_t ex_addr = PAGESIZE * 10; pmap_map(&pmap, ex_addr, pg1->paddr, pg1->size, PMAP_VALID | PMAP_DIRTY); int *x = (int *) ex_addr; for (int i = 0; i < 1024 * pg1->size; i++) *(x + i) = i; for (int i = 0; i < 1024 * pg1->size; i++) assert(*(x + i) == i); vm_page_t *pg2 = pm_alloc(1); ex_addr = PAGESIZE * 2000; pmap_map(&pmap, ex_addr, pg2->paddr, pg2->size, PMAP_VALID | PMAP_DIRTY); x = (int *) ex_addr; for (int i = 0; i < 1024 * pg2->size; i++) *(x + i) = i; for (int i = 0; i < 1024 * pg2->size; i++) assert(*(x + i) == i); pm_free(pg1); pm_free(pg2); pmap_delete(&pmap); kprintf("Tests passed\n"); return 0; }
PMEXPORT PmQueue *Pm_QueueCreate(long num_msgs, int32_t bytes_per_msg) { int32_t int32s_per_msg = (int32_t) (((bytes_per_msg + sizeof(int32_t) - 1) & ~(sizeof(int32_t) - 1)) / sizeof(int32_t)); PmQueueRep *queue = (PmQueueRep *) pm_alloc(sizeof(PmQueueRep)); if (!queue) /* memory allocation failed */ return NULL; /* need extra word per message for non-zero encoding */ queue->len = num_msgs * (int32s_per_msg + 1); queue->buffer = (int32_t *) pm_alloc(queue->len * sizeof(int32_t)); bzero(queue->buffer, queue->len * sizeof(int32_t)); if (!queue->buffer) { pm_free(queue); return NULL; } else { /* allocate the "peek" buffer */ queue->peek = (int32_t *) pm_alloc(int32s_per_msg * sizeof(int32_t)); if (!queue->peek) { /* free everything allocated so far and return */ pm_free(queue->buffer); pm_free(queue); return NULL; } } bzero(queue->buffer, queue->len * sizeof(int32_t)); queue->head = 0; queue->tail = 0; /* msg_size is in words */ queue->msg_size = int32s_per_msg + 1; /* note extra word is counted */ queue->overflow = FALSE; queue->peek_overflow = FALSE; queue->peek_flag = FALSE; return queue; }
static void init_dev_null(void) { zero_page = pm_alloc(1); junk_page = pm_alloc(1); vnodeops_init(&dev_null_vnodeops); devfs_makedev(NULL, "null", &dev_null_vnodeops, NULL); vnodeops_init(&dev_zero_vnodeops); devfs_makedev(NULL, "zero", &dev_zero_vnodeops, NULL); }
/** * @brief Enables an endpoint. * * @param[in] usbp pointer to the @p USBDriver object * @param[in] ep endpoint number * * @notapi */ void usb_lld_init_endpoint(USBDriver *usbp, usbep_t ep) { uint16_t nblocks, epr; stm32_usb_descriptor_t *dp; const USBEndpointConfig *epcp = usbp->epc[ep]; /* Setting the endpoint type.*/ switch (epcp->ep_mode & USB_EP_MODE_TYPE) { case USB_EP_MODE_TYPE_ISOC: epr = EPR_EP_TYPE_ISO; break; case USB_EP_MODE_TYPE_BULK: epr = EPR_EP_TYPE_BULK; break; case USB_EP_MODE_TYPE_INTR: epr = EPR_EP_TYPE_INTERRUPT; break; default: epr = EPR_EP_TYPE_CONTROL; } /* IN endpoint settings, always in NAK mode initially.*/ if (epcp->in_cb != NULL) epr |= EPR_STAT_TX_NAK; /* OUT endpoint settings. If the endpoint is in packet mode then it must start ready to accept data else it must start in NAK mode.*/ if (epcp->out_cb != NULL) { if (epcp->ep_mode & USB_EP_MODE_PACKET) { usbp->receiving |= (1 << ep); epr |= EPR_STAT_RX_VALID; } else epr |= EPR_STAT_RX_NAK; } /* EPxR register setup.*/ EPR_SET(ep, epr | ep); EPR_TOGGLE(ep, epr); /* Endpoint size and address initialization.*/ if (epcp->out_maxsize > 62) nblocks = (((((epcp->out_maxsize - 1) | 0x1f) + 1) / 32) << 10) | 0x8000; else nblocks = ((((epcp->out_maxsize - 1) | 1) + 1) / 2) << 10; dp = USB_GET_DESCRIPTOR(ep); dp->TXCOUNT = 0; dp->RXCOUNT = nblocks; dp->TXADDR = pm_alloc(usbp, epcp->in_maxsize); dp->RXADDR = pm_alloc(usbp, epcp->out_maxsize); }
/* pm_add_device -- describe interface/device pair to library * * This is called at intialization time, once for each * interface (e.g. DirectSound) and device (e.g. SoundBlaster 1) * The strings are retained but NOT COPIED, so do not destroy them! * * returns pmInvalidDeviceId if device memory is exceeded * otherwise returns pmNoError */ PmError pm_add_device(char *interf, char *name, int input, void *descriptor, pm_fns_type dictionary) { if (pm_descriptor_index >= pm_descriptor_max) { // expand descriptors descriptor_type new_descriptors = pm_alloc(sizeof(descriptor_node) * (pm_descriptor_max + 32)); if (!new_descriptors) return pmInsufficientMemory; if (descriptors) { memcpy(new_descriptors, descriptors, sizeof(descriptor_node) * pm_descriptor_max); free(descriptors); } pm_descriptor_max += 32; descriptors = new_descriptors; } descriptors[pm_descriptor_index].pub.interf = interf; descriptors[pm_descriptor_index].pub.name = name; descriptors[pm_descriptor_index].pub.input = input; descriptors[pm_descriptor_index].pub.output = !input; /* default state: nothing to close (for automatic device closing) */ descriptors[pm_descriptor_index].pub.opened = FALSE; /* ID number passed to win32 multimedia API open */ descriptors[pm_descriptor_index].descriptor = descriptor; /* points to PmInternal, allows automatic device closing */ descriptors[pm_descriptor_index].internalDescriptor = NULL; descriptors[pm_descriptor_index].dictionary = dictionary; pm_descriptor_index++; return pmNoError; }
uint32_t *vm_copy_kernel_pdir() { uint32_t *new_pdir = pm_alloc(); if (!new_pdir) return NULL; map_page(new_pdir, new_pdir, 0); memset(new_pdir, 0x0, PAGE_SIZE); for (unsigned i = 0; i < PAGE_SIZE / sizeof(uint32_t); i++){ if (kernel_pdir[i] & PDE_P){ if (!(kernel_pdir[i] & PDE_U)){ /* page directory entry present and meant for kernel, let's copy */ new_pdir[i] = kernel_pdir[i]; } } } /* map the page directory into itself at the next-to-last page directory entry * so that when this page directory is loaded into the cr3 it will be mapped * and is going to be easily modifiable */ /* basically, it's going to replace the kernel's page directory in the virtual * memory */ new_pdir[1023] = (uint32_t)new_pdir | PDE_P | PDE_W; return new_pdir; }
/* ============================================================================= general MIDI device queries ============================================================================= */ static void pm_winmm_general_inputs() { UINT i; WORD wRtn; midi_num_inputs = midiInGetNumDevs(); midi_in_caps = pm_alloc(sizeof(MIDIINCAPS) * midi_num_inputs); if (midi_in_caps == NULL) { // if you can't open a particular system-level midi interface // (such as winmm), we just consider that system or API to be // unavailable and move on without reporting an error. This // may be the wrong thing to do, especially in this case. return; } for (i = 0; i < midi_num_inputs; i++) { wRtn = midiInGetDevCaps(i, (LPMIDIINCAPS) &midi_in_caps[i], sizeof(MIDIINCAPS)); if (wRtn == MMSYSERR_NOERROR) { /* ignore errors here -- if pm_descriptor_max is exceeded, some devices will not be accessible. */ pm_add_device("MMSystem", midi_in_caps[i].szPname, TRUE, (void *) i, &pm_winmm_in_dictionary); } } }
PmQueue *Pm_QueueCreate(long num_msgs, long bytes_per_msg) { PmQueueRep *queue = (PmQueueRep *) pm_alloc(sizeof(PmQueueRep)); /* arg checking */ if (!queue) return NULL; queue->len = num_msgs * bytes_per_msg; queue->buffer = pm_alloc(queue->len); if (!queue->buffer) { pm_free(queue); return NULL; } queue->head = 0; queue->tail = 0; queue->msg_size = bytes_per_msg; queue->overflow = FALSE; return queue; }
uint32_t *new_pdir(void) { uint32_t *pdir = pm_alloc(); if (!pdir) return NULL; memset(pdir, 0x0, PAGE_SIZE); return pdir; }
/* ============================================================================= buffer handling ============================================================================= */ static MIDIHDR *allocate_buffer(long data_size) { LPMIDIHDR hdr = (LPMIDIHDR) pm_alloc(MIDIHDR_SYSEX_SIZE(data_size)); MIDIEVENT *evt; if (!hdr) return NULL; evt = (MIDIEVENT *) (hdr + 1); /* place MIDIEVENT after header */ hdr->lpData = (LPSTR) evt; hdr->dwBufferLength = MIDIHDR_SYSEX_BUFFER_LENGTH(data_size); hdr->dwBytesRecorded = 0; hdr->dwFlags = 0; hdr->dwUser = hdr->dwBufferLength; return hdr; }
static MIDIHDR *allocate_sysex_buffer(long data_size) { /* we're actually allocating more than data_size because the buffer * will include the MIDIEVENT header in addition to the data */ LPMIDIHDR hdr = (LPMIDIHDR) pm_alloc(MIDIHDR_SYSEX_SIZE(data_size)); MIDIEVENT *evt; if (!hdr) return NULL; evt = (MIDIEVENT *) (hdr + 1); /* place MIDIEVENT after header */ hdr->lpData = (LPSTR) evt; hdr->dwFlags = 0; hdr->dwUser = 0; return hdr; }
static PmError allocate_buffers(midiwinmm_type m, long data_size, long count) { PmError rslt = pmNoError; /* buffers is an array of count pointers to MIDIHDR/MIDIEVENT struct */ m->buffers = (LPMIDIHDR *) pm_alloc(sizeof(LPMIDIHDR) * count); if (!m->buffers) return pmInsufficientMemory; m->num_buffers = count; while (count > 0) { LPMIDIHDR hdr = allocate_buffer(data_size); if (!hdr) rslt = pmInsufficientMemory; count--; m->buffers[count] = hdr; /* this may be NULL if allocation fails */ } return rslt; }
/* ============================================================================= buffer handling ============================================================================= */ static MIDIHDR *allocate_buffer(long data_size) { /* we're actually allocating slightly more than data_size because one more word of * data is contained in MIDIEVENT. We include the size of MIDIEVENT because we need * the MIDIEVENT header in addition to the data */ LPMIDIHDR hdr = (LPMIDIHDR) pm_alloc(MIDIHDR_SIZE(data_size)); MIDIEVENT *evt; if (!hdr) return NULL; evt = (MIDIEVENT *) (hdr + 1); /* place MIDIEVENT after header */ hdr->lpData = (LPSTR) evt; hdr->dwBufferLength = sizeof(MIDIEVENT) + data_size; hdr->dwFlags = 0; hdr->dwUser = 0; return hdr; }
void fastpm_png_induce_correlation(FastPMPNGaussian * png, PM * pm, FastPMFloat * delta_k) { FastPMFloat * g_x = pm_alloc(pm); png->Volume = pm->Volume; fastpm_ic_induce_correlation(pm, delta_k, (fastpm_fkfunc) fastpm_png_potential, png); pm_assign(pm, delta_k, g_x); pm_c2r(pm, g_x); fastpm_png_transform_potential(pm, g_x, png); pm_r2c(pm, g_x, delta_k); pm_free(pm, g_x); fastpm_apply_any_transfer(pm, delta_k, delta_k, (fastpm_fkfunc) fastpm_png_transfer_function, png); }
static void pde_map(pmap_t *pmap, vaddr_t vaddr) { uint32_t pde_index = PDE_INDEX(vaddr); if (!(pmap->pde[pde_index] & V_MASK)) { /* part of page table isn't located in memory */ vm_page_t *pg = pm_alloc(1); TAILQ_INSERT_TAIL(&pmap->pte_pages, pg, pt.list); ENTRYLO_SET_PADDR(pmap->pde[pde_index], pg->paddr); ENTRYLO_SET_V(pmap->pde[pde_index], 1); ENTRYLO_SET_D(pmap->pde[pde_index], 1); } /* make sure proper address is in tlb */ pde_index &= ~1; tlbhi_t entryhi = (PTE_BASE + pde_index * PAGESIZE) | pmap->asid; tlblo_t entrylo0 = pmap->pde[pde_index]; tlblo_t entrylo1 = pmap->pde[pde_index + 1]; tlb_overwrite_random(entryhi, entrylo0, entrylo1); }
/* * Allocate <sz> worth of physical pages, and map them continuously into the * current virtual address space, starting at address <vaddr> */ void vm_alloc_pages_at(void *vaddr, unsigned flags, unsigned sz) { void *physical_page; if (sz == 0) return; vaddr = PALIGNDOWN(vaddr); /* the last bit is to map a sufficent number of pages */ unsigned npages = sz / PAGE_SIZE + (sz % PAGE_SIZE > 0); for (int i = 0; i < npages; i++){ physical_page = pm_alloc(); map_page(physical_page, vaddr, flags); vaddr += PAGE_SIZE; } }
static void pm_winmm_general_outputs() { UINT i; DWORD wRtn; midi_num_outputs = midiOutGetNumDevs(); midi_out_caps = pm_alloc( sizeof(MIDIOUTCAPS) * midi_num_outputs ); if (midi_out_caps == NULL) { // no error is reported -- see pm_winmm_general_inputs return; } for (i = 0; i < midi_num_outputs; i++) { wRtn = midiOutGetDevCaps(i, (LPMIDIOUTCAPS) &midi_out_caps[i], sizeof(MIDIOUTCAPS)); if (wRtn == MMSYSERR_NOERROR) { pm_add_device("MMSystem", midi_out_caps[i].szPname, FALSE, (void *) i, &pm_winmm_out_dictionary); } } }
static PmError allocate_buffers(midiwinmm_type m, long data_size, long count) { int i; /* buffers is an array of count pointers to MIDIHDR/MIDIEVENT struct */ m->num_buffers = 0; /* in case no memory can be allocated */ m->buffers = (LPMIDIHDR *) pm_alloc(sizeof(LPMIDIHDR) * count); if (!m->buffers) return pmInsufficientMemory; m->max_buffers = count; for (i = 0; i < count; i++) { LPMIDIHDR hdr = allocate_buffer(data_size); if (!hdr) { /* free everything allocated so far and return */ for (i = i - 1; i >= 0; i--) pm_free(m->buffers[i]); pm_free(m->buffers); m->max_buffers = 0; return pmInsufficientMemory; } m->buffers[i] = hdr; /* this may be NULL if allocation fails */ } m->num_buffers = count; return pmNoError; }
/* * Map a single page */ void map_page(void *paddr, void *vaddr, unsigned flags) { paddr = PALIGNDOWN(paddr); vaddr = PALIGNDOWN(vaddr); uint32_t *pdir = KERN_PDIR_ADDR; uint32_t *ptab = ((uint32_t *)KERN_PTABS_ADDR) + (0x400 * vm_pdir_idx(vaddr)); flags |= PTE_P | PTE_W; if (pdir[vm_pdir_idx(vaddr)] & PDE_P){ /* the corresponding page table exists */ if (ptab[vm_ptab_idx(vaddr)] & PTE_P){ /* page is already mapped */ if ((ptab[vm_ptab_idx(vaddr)] & 0xfffff000) != ((uint32_t)paddr & 0xfffff000)){ kprintf("[vm] ERROR: mapping page 0x%x to 0x%x but is already mapped to 0x%x\n", vaddr, paddr, ptab[vm_ptab_idx(vaddr)]); halt(); } else { if ((ptab[vm_ptab_idx(vaddr)] & 0x1f) != (flags & 0x1f)){ kprintf("[vm] ERROR: changing flags of mapping 0x%x (from 0x%x to 0x%x)\n", vaddr, ptab[vm_ptab_idx(vaddr)] & 0xfff, flags); halt(); } } return; } else { /* page isn't mapped */ pdir[vm_pdir_idx(vaddr)] |= flags; ptab[vm_ptab_idx(vaddr)] = (uint32_t)paddr | flags; } } else { /* the page table doesn't exist */ uint32_t *new_ptab = pm_alloc(); pdir[vm_pdir_idx(vaddr)] = (uint32_t)new_ptab | flags; vm_flush_page(&pdir[vm_pdir_idx(vaddr)]); ptab[vm_ptab_idx(vaddr)] = (uint32_t)paddr | flags; } }
int main() { vm_page_t *page = pm_alloc(1); MALLOC_DEFINE(mp, "testing memory pool"); kmalloc_init(mp); kmalloc_add_arena(mp, page->vaddr, PAGESIZE); void *ptr1 = kmalloc(mp, 15, 0); assert(ptr1 != NULL); void *ptr2 = kmalloc(mp, 23, 0); assert(ptr2 != NULL && ptr2 > ptr1); void *ptr3 = kmalloc(mp, 7, 0); assert(ptr3 != NULL && ptr3 > ptr2); void *ptr4 = kmalloc(mp, 2000, 0); assert(ptr4 != NULL && ptr4 > ptr3); void *ptr5 = kmalloc(mp, 1000, 0); assert(ptr5 != NULL); kfree(mp, ptr1); kfree(mp, ptr2); kmalloc_dump(mp); kfree(mp, ptr3); kfree(mp, ptr5); void *ptr6 = kmalloc(mp, 2000, M_NOWAIT); assert(ptr6 == NULL); pm_free(page); return 0; }
static LPMIDIHDR get_free_output_buffer(PmInternal *midi) { LPMIDIHDR r = NULL; midiwinmm_type m = (midiwinmm_type) midi->descriptor; while (TRUE) { int i; for (i = 0; i < m->num_buffers; i++) { /* cycle through buffers, modulo m->num_buffers */ m->next_buffer++; if (m->next_buffer >= m->num_buffers) m->next_buffer = 0; r = m->buffers[m->next_buffer]; if ((r->dwFlags & MHDR_PREPARED) == 0) goto found_buffer; } /* after scanning every buffer and not finding anything, block */ if (WaitForSingleObject(m->buffer_signal, 1000) == WAIT_TIMEOUT) { #ifdef DEBUG printf("PortMidi warning: get_free_output_buffer() wait timed out after 1000ms\n"); #endif /* if we're trying to send a sysex message, maybe the * message is too big and we need more message buffers. * Expand the buffer pool by 128KB using 1024-byte buffers. */ /* first, expand the buffers array if necessary */ if (!m->buffers_expanded) { LPMIDIHDR *new_buffers = (LPMIDIHDR *) pm_alloc( (m->num_buffers + NUM_EXPANSION_BUFFERS) * sizeof(LPMIDIHDR)); /* if no memory, we could return a no-memory error, but user * probably will be unprepared to deal with it. Maybe the * MIDI driver is temporarily hung so we should just wait. * I don't know the right answer, but waiting is easier. */ if (!new_buffers) continue; /* copy buffers to new_buffers and replace buffers */ memcpy(new_buffers, m->buffers, m->num_buffers * sizeof(LPMIDIHDR)); pm_free(m->buffers); m->buffers = new_buffers; m->max_buffers = m->num_buffers + NUM_EXPANSION_BUFFERS; m->buffers_expanded = TRUE; } /* next, add one buffer and return it */ if (m->num_buffers < m->max_buffers) { r = allocate_buffer(EXPANSION_BUFFER_LEN); /* again, if there's no memory, we may not really be * dead -- maybe the system is temporarily hung and * we can just wait longer for a message buffer */ if (!r) continue; m->buffers[m->num_buffers++] = r; goto found_buffer; /* break out of 2 loops */ } /* else, we've allocated all NUM_EXPANSION_BUFFERS buffers, * and we have no free buffers to send. We'll just keep * polling to see if any buffers show up. */ } } found_buffer: r->dwBytesRecorded = 0; /* actual buffer length is saved in dwUser field */ r->dwBufferLength = (DWORD) r->dwUser; return r; }
static PmError winmm_in_open(PmInternal *midi, void *driverInfo) { DWORD dwDevice; int i = midi->device_id; midiwinmm_type m; LPMIDIHDR hdr; dwDevice = (DWORD) descriptors[i].descriptor; /* create system dependent device data */ m = (midiwinmm_type) pm_alloc(sizeof(midiwinmm_node)); /* create */ midi->descriptor = m; if (!m) goto no_memory; m->handle.in = NULL; m->buffers = NULL; m->num_buffers = 0; m->next_buffer = 0; m->last_time = 0; m->first_message = TRUE; /* not used for input */ m->sysex_mode = FALSE; m->sysex_word = 0; m->sysex_byte_count = 0; m->sync_time = 0; m->delta = 0; m->error = MMSYSERR_NOERROR; m->callback_error = MMSYSERR_NOERROR; /* open device */ pm_hosterror = midiInOpen(&(m->handle.in), /* input device handle */ dwDevice, /* device ID */ (DWORD) winmm_in_callback, /* callback address */ (DWORD) midi, /* callback instance data */ CALLBACK_FUNCTION); /* callback is a procedure */ if (pm_hosterror) goto free_descriptor; /* allocate first buffer for sysex data */ hdr = allocate_buffer(PM_DEFAULT_SYSEX_BUFFER_SIZE); if (!hdr) goto close_device; pm_hosterror = midiInPrepareHeader(m->handle.in, hdr, sizeof(MIDIHDR)); if (pm_hosterror) { pm_free(hdr); goto close_device; } pm_hosterror = midiInAddBuffer(m->handle.in, hdr, sizeof(MIDIHDR)); if (pm_hosterror) goto close_device; /* allocate second buffer */ hdr = allocate_buffer(PM_DEFAULT_SYSEX_BUFFER_SIZE); if (!hdr) goto close_device; pm_hosterror = midiInPrepareHeader(m->handle.in, hdr, sizeof(MIDIHDR)); if (pm_hosterror) { pm_free(hdr); goto reset_device; /* because first buffer was added */ } pm_hosterror = midiInAddBuffer(m->handle.in, hdr, sizeof(MIDIHDR)); if (pm_hosterror) goto reset_device; /* start device */ pm_hosterror = midiInStart(m->handle.in); if (pm_hosterror) goto reset_device; return pmNoError; /* undo steps leading up to the detected error */ reset_device: /* ignore return code (we already have an error to report) */ midiInReset(m->handle.in); close_device: midiInClose(m->handle.in); /* ignore return code */ free_descriptor: midi->descriptor = NULL; pm_free(m); no_memory: if (pm_hosterror) { int err = midiInGetErrorText(pm_hosterror, (char *) pm_hosterror_text, PM_HOST_ERROR_MSG_LEN); assert(err == MMSYSERR_NOERROR); return pmHostError; } /* if !pm_hosterror, then the error must be pmInsufficientMemory */ return pmInsufficientMemory; /* note: if we return an error code, the device will be closed and memory will be freed. It's up to the caller to free the parameter midi */ }
thread_t *thread_createKernel(process_t *process, thread_entry_t entry, size_t UNUSED(stackSize), uint32_t argCount, va_list args) { thread_t *thread = thread_createVoid(); if(thread) { //size_t stackPages = 1; //MAX(1, stackSize / 4096); uint8_t *kernelStack = (uint8_t *)pm_alloc(1); if(!kernelStack) { hfree(NULL, thread); return NULL; } thread->entry = entry; thread->process = process; // Create the kernel stack thread->kernelStack = kernelStack; thread->kernelStackVirt = (uint8_t *)vm_allocLimit(process->pdirectory, (uintptr_t)kernelStack, THREAD_STACK_LIMIT, 1, VM_FLAGS_KERNEL); uint32_t *stack = ((uint32_t *)(thread->kernelStackVirt + VM_PAGE_SIZE)) - argCount; memset(thread->kernelStackVirt, 0, 1 * VM_PAGE_SIZE); // Push the arguments for the thread on its stack thread->arguments = NULL; thread->argumentCount = argCount; if(argCount > 0) { thread->arguments = (uintptr_t **)halloc(NULL, argCount * sizeof(uintptr_t *)); for(uint32_t i=0; i<argCount; i++) { uintptr_t *val = va_arg(args, uintptr_t *); thread->arguments[i] = val; } } // Forge initial kernel stackframe *(-- stack) = 0x10; // ss *(-- stack) = 0x0; // esp, kernel threads use the TSS *(-- stack) = 0x0200; // eflags *(-- stack) = 0x8; // cs *(-- stack) = (uint32_t)entry; // eip // Interrupt number and error code *(-- stack) = 0x0; *(-- stack) = 0x0; // General purpose register *(-- stack) = 0x0; *(-- stack) = 0x0; *(-- stack) = 0x0; *(-- stack) = 0x0; *(-- stack) = 0x0; *(-- stack) = 0x0; *(-- stack) = 0x0; *(-- stack) = 0x0; // Segment registers *(-- stack) = 0x10; *(-- stack) = 0x10; *(-- stack) = 0x10; *(-- stack) = 0x10; // Update the threads thread->esp = (uint32_t)stack; // Attach the thread to the process; spinlock_lock(&process->threadLock); // Acquire the process' thread lock so we don't end up doing bad things thread->id = _thread_getUniqueID(process); if(process->mainThread) { thread_t *mthread = process->mainThread; // Attach the new thread next to the main thread thread->next = mthread->next; mthread->next = thread; } else { process->mainThread = thread; process->scheduledThread = thread; } spinlock_unlock(&process->threadLock); }
void pmap_init(pmap_t *pmap) { pmap->pte = (pte_t *)PTE_BASE; pmap->pde_page = pm_alloc(1); pmap->pde = (pte_t *)pmap->pde_page->vaddr; TAILQ_INIT(&pmap->pte_pages); }
io_library_t *io_libraryCreate(const char *path, uint8_t *buffer, size_t UNUSED(length)) { io_library_t *library = halloc(NULL, sizeof(io_library_t)); if(library) { // Setup the library memset(library, 0, sizeof(io_library_t)); library->lock = SPINLOCK_INIT; library->refCount = 1; library->path = halloc(NULL, strlen(path) + 1); library->dependencies = list_create(sizeof(struct io_dependency_s), offsetof(struct io_dependency_s, next), offsetof(struct io_dependency_s, prev)); if(!library->path || !library->dependencies) { if(library->path) hfree(NULL, library->path); if(library->dependencies) list_destroy(library->dependencies); dbg("iolink: Couldn't allocate enough memory for %s\n", path); return NULL; } strcpy(library->path, path); library->name = (char *)sys_fileWithoutPath(library->path); // Get the basic ELF info elf_header_t *header = (elf_header_t *)buffer; if(strncmp((const char *)header->e_ident, ELF_MAGIC, strlen(ELF_MAGIC)) != 0 || header->e_type != ET_DYN) { hfree(NULL, library->path); list_destroy(library->dependencies); dbg("iolink: %s is not a valid binary!\n", path); return NULL; } // Parse the program header elf_program_header_t *programHeader = (elf_program_header_t *)(buffer + header->e_phoff); elf_program_header_t *ptload[2]; vm_address_t minAddress = -1; vm_address_t maxAddress = 0; size_t segments = 0; // Calculate the needed size for(int i=0; i<header->e_phnum; i++) { elf_program_header_t *program = &programHeader[i]; if(program->p_type == PT_LOAD) { if(program->p_paddr < minAddress) minAddress = program->p_paddr; if(program->p_paddr + program->p_memsz > maxAddress) maxAddress = program->p_paddr + program->p_memsz; ptload[segments ++] = program; } if(program->p_type == PT_DYNAMIC) library->dynamic = (elf_dyn_t *)program->p_vaddr; } // Reserve enough memory and copy the .text section library->pages = pageCount(maxAddress - minAddress); library->pmemory = pm_alloc(library->pages); if(!library->pmemory) { io_libraryRelease(library); return NULL; } library->vmemory = vm_alloc(vm_getKernelDirectory(), (uintptr_t)library->pmemory, library->pages, VM_FLAGS_KERNEL); if(!library->vmemory) { io_libraryRelease(library); return NULL; } uint8_t *target = (uint8_t *)library->vmemory; uint8_t *source = buffer; memset(target, 0, library->pages * VM_PAGE_SIZE); for(size_t i=0; i<segments; i++) { elf_program_header_t *program = ptload[i]; memcpy(&target[program->p_vaddr - minAddress], &source[program->p_offset], program->p_filesz); } library->relocBase = library->vmemory - minAddress; // Verify if(library->dynamic) { library->dynamic = (elf_dyn_t *)(library->relocBase + ((uintptr_t)library->dynamic)); io_libraryDigestDynamic(library); } } return library; }
void pm_init(PM * pm, PMInit * init, MPI_Comm comm) { pm->init = *init; pm->mem = _libfastpm_get_gmem(); /* initialize the domain */ MPI_Comm_rank(comm, &pm->ThisTask); MPI_Comm_size(comm, &pm->NTask); int Ny = init->NprocY; int Nx; if(Ny <= 0) { Ny = 1; Nx = pm->NTask; if(!init->use_fftw) { for(; Ny * Ny < pm->NTask; Ny ++) continue; for(; Ny >= 1; Ny--) { if (pm->NTask % Ny == 0) break; continue; } } } else { if(pm->NTask % Ny != 0) { fastpm_raise(-1, "NprocY(%d) and NTask(%d) is incompatible\n", Ny, pm->NTask); } } Nx = pm->NTask / Ny; pm->Nproc[0] = Nx; pm->Nproc[1] = Ny; if(init->use_fftw) { if(Ny != 1) { fastpm_raise(-1, "FFTW requires Ny == 1; Ny = %d\n", Ny); } } int d; pm->Norm = 1.0; pm->Volume = 1.0; for(d = 0; d < 3; d ++) { pm->Nmesh[d] = init->Nmesh; pm->BoxSize[d] = init->BoxSize; pm->Below[d] = 0; pm->Above[d] = 1; pm->CellSize[d] = pm->BoxSize[d] / pm->Nmesh[d]; pm->InvCellSize[d] = 1.0 / pm->CellSize[d]; pm->Norm *= pm->Nmesh[d]; pm->Volume *= pm->BoxSize[d]; } pfft_create_procmesh(2, comm, pm->Nproc, &pm->Comm2D); if(init->use_fftw) { pm->allocsize = 2 * fftw_local_size_dft_r2c( 3, pm->Nmesh, pm->Comm2D, (pm->init.transposed?FFTW_MPI_TRANSPOSED_OUT:0), pm->IRegion.size, pm->IRegion.start, pm->ORegion.size, pm->ORegion.start); } else { pm->allocsize = 2 * pfft_local_size_dft_r2c( 3, pm->Nmesh, pm->Comm2D, (pm->init.transposed?PFFT_TRANSPOSED_OUT:0) | PFFT_PADDED_R2C, pm->IRegion.size, pm->IRegion.start, pm->ORegion.size, pm->ORegion.start); } /* Note that we need to fix up the padded size of the real data; * and transpose with strides , */ pm->IRegion.strides[2] = 1; pm->IRegion.strides[1] = pm->IRegion.size[2]; pm->IRegion.strides[0] = pm->IRegion.size[1] * pm->IRegion.strides[1]; pm->IRegion.total = pm->IRegion.size[0] * pm->IRegion.strides[0]; /* remove padding from the view */ pm->IRegion.size[2] = pm->Nmesh[2]; if(pm->init.transposed) { if(pm->init.use_fftw) { /* FFTW transposed, y, x, z */ pm->ORegion.strides[2] = 1; pm->ORegion.strides[0] = pm->ORegion.size[2]; pm->ORegion.strides[1] = pm->ORegion.size[0] * pm->ORegion.strides[0]; pm->ORegion.total = pm->ORegion.size[1] * pm->ORegion.strides[1]; } else { /* PFFT transposed, y, z, x */ pm->ORegion.strides[0] = 1; pm->ORegion.strides[2] = pm->ORegion.size[0]; pm->ORegion.strides[1] = pm->ORegion.size[2] * pm->ORegion.strides[2]; pm->ORegion.total = pm->ORegion.size[1] * pm->ORegion.strides[1]; } } else { /* non-transposed */ pm->ORegion.strides[2] = 1; pm->ORegion.strides[1] = pm->ORegion.size[2]; pm->ORegion.strides[0] = pm->ORegion.size[1] * pm->ORegion.strides[1]; pm->ORegion.total = pm->ORegion.size[0] * pm->ORegion.strides[0]; } for(d = 0; d < 2; d ++) { MPI_Comm projected; int remain_dims[2] = {0, 0}; remain_dims[d] = 1; pm->Grid.edges_int[d] = malloc(sizeof(pm->Grid.edges_int[0][0]) * (pm->Nproc[d] + 1)); pm->Grid.edges_float[d] = malloc(sizeof(pm->Grid.edges_float[0][0]) * (pm->Nproc[d] + 1)); pm->Grid.MeshtoCart[d] = malloc(sizeof(int) * pm->Nmesh[d]); MPI_Cart_sub(pm->Comm2D, remain_dims, &projected); MPI_Allgather(&pm->IRegion.start[d], 1, MPI_PTRDIFF, pm->Grid.edges_int[d], 1, MPI_PTRDIFF, projected); int ntask; MPI_Comm_size(projected, &ntask); MPI_Comm_free(&projected); int j; for(j = 0; j < pm->Nproc[d]; j ++) { pm->Grid.edges_float[d][j] = 1.0 * pm->Grid.edges_int[d][j] / pm->Nmesh[d] * pm->BoxSize[d]; } /* Last edge is at the edge of the box */ pm->Grid.edges_float[d][j] = pm->BoxSize[d]; pm->Grid.edges_int[d][j] = pm->Nmesh[d]; /* fill in the look up table */ for(j = 0; j < pm->Nproc[d]; j ++) { int i; for(i = pm->Grid.edges_int[d][j]; i < pm->Grid.edges_int[d][j+1]; i ++) { pm->Grid.MeshtoCart[d][i] = j; } } } FastPMFloat * canvas = pm_alloc(pm); FastPMFloat * workspace = pm_alloc(pm); if(pm->init.use_fftw) { pm->r2c = plan_dft_r2c_fftw( 3, pm->Nmesh, (void*) workspace, (void*) canvas, pm->Comm2D, (pm->init.transposed?FFTW_MPI_TRANSPOSED_OUT:0) | FFTW_ESTIMATE | FFTW_DESTROY_INPUT ); pm->c2r = plan_dft_c2r_fftw( 3, pm->Nmesh, (void*) canvas, (void*) canvas, pm->Comm2D, (pm->init.transposed?FFTW_MPI_TRANSPOSED_IN:0) | FFTW_ESTIMATE | FFTW_DESTROY_INPUT ); } else { pm->r2c = plan_dft_r2c( 3, pm->Nmesh, (void*) workspace, (void*) canvas, pm->Comm2D, PFFT_FORWARD, (pm->init.transposed?PFFT_TRANSPOSED_OUT:0) | PFFT_PADDED_R2C | PFFT_ESTIMATE | PFFT_TUNE //| PFFT_MEASURE | PFFT_DESTROY_INPUT ); pm->c2r = plan_dft_c2r( 3, pm->Nmesh, (void*) workspace, (void*) workspace, pm->Comm2D, PFFT_BACKWARD, (pm->init.transposed?PFFT_TRANSPOSED_IN:0) | PFFT_PADDED_C2R | PFFT_ESTIMATE //| PFFT_MEASURE | PFFT_TUNE | PFFT_DESTROY_INPUT ); } pm_free(pm, workspace); pm_free(pm, canvas); for(d = 0; d < 3; d++) { pm->MeshtoK[d] = malloc(pm->Nmesh[d] * sizeof(double)); int i; for(i = 0; i < pm->Nmesh[d]; i++) { int ii = i; if(ii >= pm->Nmesh[d] / 2) { ii -= pm->Nmesh[d]; } pm->MeshtoK[d][i] = ii * 2 * M_PI / pm->BoxSize[d]; } } }
ld_exectuable_t *ld_exectuableCreate(vm_page_directory_t pdirectory, uint8_t *begin, size_t UNUSED(size)) { elf_header_t *header = (elf_header_t *)begin; if(strncmp((const char *)header->e_ident, ELF_MAGIC, strlen(ELF_MAGIC)) != 0) return NULL; ld_exectuable_t *executable = halloc(NULL, sizeof(ld_exectuable_t)); if(executable) { // Initialize the executable executable->useCount = 1; executable->entry = header->e_entry; executable->pdirectory = pdirectory; elf_program_header_t *programHeader = (elf_program_header_t *)(begin + header->e_phoff); vm_address_t minAddress = -1; vm_address_t maxAddress = 0; size_t pages = 0; // Calculate the needed size for(int i=0; i<header->e_phnum; i++) { elf_program_header_t *program = &programHeader[i]; if(program->p_type == PT_LOAD) { if(program->p_paddr < minAddress) minAddress = program->p_paddr; if(program->p_paddr + program->p_memsz > maxAddress) maxAddress = program->p_paddr + program->p_memsz; } } // Calculate the starting address and the number of pages we need to allocate minAddress = round4kDown(minAddress); pages = pageCount(maxAddress - minAddress); // Memory allocation uint8_t *memory = (uint8_t *)pm_alloc(pages); uint8_t *target = (uint8_t *)vm_alloc(vm_getKernelDirectory(), (uintptr_t)memory, pages, VM_FLAGS_KERNEL); uint8_t *source = begin; memset(target, 0, pages * VM_PAGE_SIZE); // Copy the data from the image for(int i=0; i<header->e_phnum; i++) { elf_program_header_t *program = &programHeader[i]; if(program->p_type == PT_LOAD) { memcpy(&target[program->p_vaddr - minAddress], &source[program->p_offset], program->p_filesz); } } vm_free(vm_getKernelDirectory(), (vm_address_t)target, pages); vm_mapPageRange(pdirectory, (uintptr_t)memory, minAddress, pages, VM_FLAGS_USERLAND_R); executable->pimage = (uintptr_t)memory; executable->vimage = (vm_address_t)minAddress; executable->imagePages = pages; } return executable; }
dma_t *dma_request(size_t pages, uint32_t flags) { dma_t *dma = halloc(NULL, sizeof(dma_t)); if(dma) { dma->vaddress = 0; dma->pfragmentCount = 0; dma->pages = pages; if((flags & kDMARequestFlagContiguous) || pages == 1) { dma->pfragmentCount = 1; dma->pfragmentPages[0] = pages; dma->pfragments[0] = pm_alloc(pages); if(!dma->pfragments[0]) { hfree(NULL, dma); return NULL; } } else { size_t pagesLeft = pages; size_t i = 0; while(pagesLeft > 0 && i < kDMAMaxPhysicalFragments) { size_t npages = pagesLeft; uintptr_t pmemory = pm_alloc(npages); if(!pmemory) { if(npages == 1) { dma_free(dma); return NULL; } npages /= 2; continue; } dma->pfragments[i] = pmemory; dma->pfragmentPages[i] = npages; dma->pfragmentCount ++; i ++; } } bool result = vm_fulfillDMARequest(dma); if(!result) { dma_free(dma); return NULL; } } return dma; }
static PmError winmm_out_open(PmInternal *midi, void *driverInfo) { DWORD dwDevice; int i = midi->device_id; midiwinmm_type m; MIDIPROPTEMPO propdata; MIDIPROPTIMEDIV divdata; int max_sysex_len = midi->buffer_len * 4; int output_buffer_len; int num_buffers; dwDevice = (DWORD) descriptors[i].descriptor; /* create system dependent device data */ m = (midiwinmm_type) pm_alloc(sizeof(midiwinmm_node)); /* create */ midi->descriptor = m; if (!m) goto no_memory; m->handle.out = NULL; m->buffers = NULL; m->num_buffers = 0; m->max_buffers = 0; m->buffers_expanded = FALSE; m->next_buffer = 0; m->last_time = 0; m->first_message = TRUE; /* we treat first message as special case */ m->sysex_mode = FALSE; m->sysex_word = 0; m->sysex_byte_count = 0; m->hdr = NULL; m->sync_time = 0; m->delta = 0; m->error = MMSYSERR_NOERROR; /* create a signal */ m->buffer_signal = CreateEvent(NULL, FALSE, FALSE, NULL); /* this should only fail when there are very serious problems */ assert(m->buffer_signal); /* open device */ if (midi->latency == 0) { /* use simple midi out calls */ pm_hosterror = midiOutOpen( (LPHMIDIOUT) & m->handle.out, /* device Handle */ dwDevice, /* device ID */ /* note: same callback fn as for StreamOpen: */ (DWORD_PTR) winmm_streamout_callback, /* callback fn */ (DWORD_PTR) midi, /* callback instance data */ CALLBACK_FUNCTION); /* callback type */ } else { /* use stream-based midi output (schedulable in future) */ pm_hosterror = midiStreamOpen( &m->handle.stream, /* device Handle */ (LPUINT) & dwDevice, /* device ID pointer */ 1, /* reserved, must be 1 */ (DWORD_PTR) winmm_streamout_callback, (DWORD_PTR) midi, /* callback instance data */ CALLBACK_FUNCTION); } if (pm_hosterror != MMSYSERR_NOERROR) { goto free_descriptor; } if (midi->latency == 0) { num_buffers = NUM_SIMPLE_SYSEX_BUFFERS; output_buffer_len = max_sysex_len / num_buffers; if (output_buffer_len < MIN_SIMPLE_SYSEX_LEN) output_buffer_len = MIN_SIMPLE_SYSEX_LEN; } else { long dur = 0; num_buffers = max(midi->buffer_len, midi->latency / 2); if (num_buffers < MIN_STREAM_BUFFERS) num_buffers = MIN_STREAM_BUFFERS; output_buffer_len = STREAM_BUFFER_LEN; propdata.cbStruct = sizeof(MIDIPROPTEMPO); propdata.dwTempo = 480000; /* microseconds per quarter */ pm_hosterror = midiStreamProperty(m->handle.stream, (LPBYTE) & propdata, MIDIPROP_SET | MIDIPROP_TEMPO); if (pm_hosterror) goto close_device; divdata.cbStruct = sizeof(MIDIPROPTEMPO); divdata.dwTimeDiv = 480; /* divisions per quarter */ pm_hosterror = midiStreamProperty(m->handle.stream, (LPBYTE) & divdata, MIDIPROP_SET | MIDIPROP_TIMEDIV); if (pm_hosterror) goto close_device; } /* allocate buffers */ if (allocate_buffers(m, output_buffer_len, num_buffers)) goto free_buffers; /* start device */ if (midi->latency != 0) { pm_hosterror = midiStreamRestart(m->handle.stream); if (pm_hosterror != MMSYSERR_NOERROR) goto free_buffers; } return pmNoError; free_buffers: /* buffers are freed below by winmm_out_delete */ close_device: midiOutClose(m->handle.out); free_descriptor: midi->descriptor = NULL; winmm_out_delete(midi); /* frees buffers and m */ no_memory: if (pm_hosterror) { int err = midiOutGetErrorText(pm_hosterror, (char *) pm_hosterror_text, PM_HOST_ERROR_MSG_LEN); assert(err == MMSYSERR_NOERROR); return pmHostError; } return pmInsufficientMemory; }
static PmError winmm_in_open(PmInternal *midi, void *driverInfo) { DWORD dwDevice; int i = midi->device_id; int max_sysex_len = midi->buffer_len * 4; int num_input_buffers = max_sysex_len / INPUT_SYSEX_LEN; midiwinmm_type m; dwDevice = (DWORD) descriptors[i].descriptor; /* create system dependent device data */ m = (midiwinmm_type) pm_alloc(sizeof(midiwinmm_node)); /* create */ midi->descriptor = m; if (!m) goto no_memory; m->handle.in = NULL; m->buffers = NULL; /* not used for input */ m->num_buffers = 0; /* not used for input */ m->max_buffers = FALSE; /* not used for input */ m->buffers_expanded = 0; /* not used for input */ m->next_buffer = 0; /* not used for input */ m->buffer_signal = 0; /* not used for input */ m->last_time = 0; m->first_message = TRUE; /* not used for input */ m->sysex_mode = FALSE; m->sysex_word = 0; m->sysex_byte_count = 0; m->hdr = NULL; /* not used for input */ m->sync_time = 0; m->delta = 0; m->error = MMSYSERR_NOERROR; /* 4000 is based on Windows documentation -- that's the value used in the memory manager. It's small enough that it should not hurt performance even if it's not optimal. */ InitializeCriticalSectionAndSpinCount(&m->lock, 4000); /* open device */ pm_hosterror = midiInOpen( &(m->handle.in), /* input device handle */ dwDevice, /* device ID */ (DWORD_PTR) winmm_in_callback, /* callback address */ (DWORD_PTR) midi, /* callback instance data */ CALLBACK_FUNCTION); /* callback is a procedure */ if (pm_hosterror) goto free_descriptor; if (num_input_buffers < MIN_INPUT_BUFFERS) num_input_buffers = MIN_INPUT_BUFFERS; for (i = 0; i < num_input_buffers; i++) { if (allocate_input_buffer(m->handle.in, INPUT_SYSEX_LEN)) { /* either pm_hosterror was set, or the proper return code is pmInsufficientMemory */ goto close_device; } } /* start device */ pm_hosterror = midiInStart(m->handle.in); if (pm_hosterror) goto reset_device; return pmNoError; /* undo steps leading up to the detected error */ reset_device: /* ignore return code (we already have an error to report) */ midiInReset(m->handle.in); close_device: midiInClose(m->handle.in); /* ignore return code */ free_descriptor: midi->descriptor = NULL; pm_free(m); no_memory: if (pm_hosterror) { int err = midiInGetErrorText(pm_hosterror, (char *) pm_hosterror_text, PM_HOST_ERROR_MSG_LEN); assert(err == MMSYSERR_NOERROR); return pmHostError; } /* if !pm_hosterror, then the error must be pmInsufficientMemory */ return pmInsufficientMemory; /* note: if we return an error code, the device will be closed and memory will be freed. It's up to the caller to free the parameter midi */ }