/* * UMA backend page allocator for the jumbo frame zones. * * Allocates kernel virtual memory that is backed by contiguous physical * pages. */ static void * mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { /* Inform UMA that this allocator uses kernel_map/object. */ void *p; /* Returned page */ struct vm_page_list * page_list; vm_page_t pages; unsigned long size, page_num, pfn; int i; size= round_page(bytes); page_num = size >> PAGE_SHIFT; *flags = UMA_SLAB_FREEBSD_KERNEL; p = (void *)host_malloc(size, PAGE_SIZE); // kmem_malloc(kmem_map, bytes, wait); pfn = ((unsigned long)p) >> PAGE_SHIFT; if(p != NULL){ pages = (vm_page_t)host_malloc(sizeof(struct vm_page) * page_num, -1); if(pages != NULL){ for(i = 0; i < page_num; i ++, pfn ++){ page_list = &page_slab_hash[pfn % MAX_UPTCP_PAGENUM]; pages[i].page_addr = (uint8_t*)(pfn << PAGE_SHIFT); pages[i].flags = 0; pages[i].object= NULL; SLIST_INSERT_HEAD(page_list, &pages[i], page_link); } } else { host_free(p); return NULL; } } return (p); }
/* Allocate per Virtual Machine data area for Device Driver. */ GLOBAL IHP *NIDDB_Allocate_Instance_Data IFN3 ( int, size, /* Size of data area required */ NIDDB_CR_CALLBACK, create_cb, /* create callback */ NIDDB_TM_CALLBACK, terminate_cb /* terminate callback */ ) { int i; if ( !allocation_allowed ) { /* We are still managing instances for Windows, we can't add more data Instances on the fly! */ return (IHP *)0; } /* Find available instance slot */ for (i = 0; i < MAX_INSTANCES; i++) { if ( master_ptrs[i] == (IHP)0 ) break; /* found empty slot */ } if ( i == MAX_INSTANCES ) { /* No free slot */ always_trace0("NIDDB: Too many Data Instances being requested."); return (IHP)0; } /* Allocate data area */ if ( (master_ptrs[i] = (IHP)host_malloc(size)) == (IHP)0 ) { return (IHP)0; /* No room at inn */ } /* Save details of this instance */ snapshot_ptrs[i] = master_ptrs[i]; instance_size[i] = size; /* Save callbacks */ create_callback[i] = create_cb; terminate_callback[i] = terminate_cb; return &master_ptrs[i]; /* return handle */ }
/* Allocate data structures required for new data instance. */ LOCAL IBOOL allocate_NIDDB IFN2 ( IU32, inst_handle, /* (I ) Windows handle for Virtual Machine */ int *, record_id /* ( 0) Record ID (ie virtualising byte value) */ ) { int v; int i; IHP *p; IHP *instance_ptr; /* Search for empty virtual record */ for (v = 0; v < MAX_VMS; v++) { if ( vrecs[v].vr_pinst_tbl == (IHP *)0 ) break; /* found empty slot */ } /* Ensure we found empty slot */ if ( v == MAX_VMS ) { /* No free slot! */ always_trace0("NIDDB: Too many Virtual Machines being requested."); return FALSE; } /* Allocate new instance table - ensure it is zero */ if ( (instance_ptr = (IHP *)host_calloc(1, sizeof(master_ptrs))) == (IHP *)0 ) { /* No room at the inn */ return FALSE; } /* Allocate new data areas */ for (i = 0, p = instance_ptr; i < MAX_INSTANCES; i++, p++) { /* Use master pointer as the 'creation template' */ if ( master_ptrs[i] != (IHP *)0 ) { if ( (*p = (IHP)host_malloc(instance_size[i])) == (IHP)0 ) { /* No room at the inn */ /* Clean up any blocks which may have been allocated */ for (i = 0, p = instance_ptr; i < MAX_INSTANCES; i++, p++) { if ( *p != (IHP)0 ) host_free(*p); } return FALSE; } } } /* Finally fill in virtual record */ vrecs[v].vr_inst_handle = inst_handle; vrecs[v].vr_pinst_tbl = instance_ptr; *record_id = v; return TRUE; }