コード例 #1
0
ファイル: cpu.c プロジェクト: argp/xnu
void
cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
{
	vm_offset_t		irq_stack = 0;
	vm_offset_t		fiq_stack = 0;

	kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
				   INTSTACK_SIZE + (2 * PAGE_SIZE),
				   PAGE_MASK,
				   KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
				   VM_KERN_MEMORY_STACK);
	if (kr != KERN_SUCCESS)
		panic("Unable to allocate cpu interrupt stack\n");

	cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
	cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;

	kr = kernel_memory_allocate(kernel_map, &fiq_stack,
				   FIQSTACK_SIZE + (2 * PAGE_SIZE),
				   PAGE_MASK,
				   KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
				   VM_KERN_MEMORY_STACK);
	if (kr != KERN_SUCCESS)
		panic("Unable to allocate cpu exception stack\n");

	cpu_data_ptr->fiqstack_top = fiq_stack + PAGE_SIZE + FIQSTACK_SIZE;
	cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
}
コード例 #2
0
ファイル: gzalloc.c プロジェクト: Apple-FOSS-Mirror/xnu
void gzalloc_zone_init(zone_t z) {
	if (gzalloc_mode) {
		bzero(&z->gz, sizeof(z->gz));

		if (gzfc_size && (z->elem_size >= gzalloc_min) && (z->elem_size <= gzalloc_max) && (z->gzalloc_exempt == FALSE)) {
			vm_size_t gzfcsz = round_page(sizeof(*z->gz.gzfc) * gzfc_size);

			/* If the VM/kmem system aren't yet configured, carve
			 * out the free element cache structure directly from the
			 * gzalloc_reserve supplied by the pmap layer.
			*/
			if (!kmem_ready) {
				if (gzalloc_reserve_size < gzfcsz)
					panic("gzalloc reserve exhausted");

				z->gz.gzfc = (vm_offset_t *)gzalloc_reserve;
				gzalloc_reserve += gzfcsz;
				gzalloc_reserve_size -= gzfcsz;
			} else {
				kern_return_t kr;

				if ((kr = kernel_memory_allocate(kernel_map, (vm_offset_t *)&z->gz.gzfc, gzfcsz, 0, KMA_KOBJECT)) != KERN_SUCCESS) {
					panic("zinit/gzalloc: kernel_memory_allocate failed (%d) for 0x%lx bytes", kr, (unsigned long) gzfcsz);
				}
			}
			bzero((void *)z->gz.gzfc, gzfcsz);
		}
	}
}
コード例 #3
0
ファイル: osif.c プロジェクト: Lezval/spl
void* osif_malloc(sa_size_t size)
{
#ifdef IN_KERNEL
    
    void *tr;
    kern_return_t kr;

    kr = kernel_memory_allocate(
                                kernel_map,
                                &tr,
                                size,
                                0,
                                0);

    if (kr == KERN_SUCCESS) {
        return tr;
    } else {
        return NULL;
    }
    
#else

    return (void*)malloc(size);
    
#endif
}
コード例 #4
0
ファイル: stack.c プロジェクト: MACasuba/MACasuba-Utils-git
/*
 *	stack_alloc:
 *
 *	Allocate a stack for a thread, may
 *	block.
 */
void
stack_alloc(
	thread_t	thread)
{
	vm_offset_t		stack;
	spl_t			s;
	int			guard_flags;

	assert(thread->kernel_stack == 0);

	s = splsched();
	stack_lock();
	stack = stack_free_list;
	if (stack != 0) {
		stack_free_list = stack_next(stack);
		stack_free_count--;
	}
	else {
		if (++stack_total > stack_hiwat)
			stack_hiwat = stack_total;
		stack_new_count++;
	}
	stack_free_delta--;
	stack_unlock();
	splx(s);
		
	if (stack == 0) {

		/*
		 * Request guard pages on either side of the stack.  Ask
		 * kernel_memory_allocate() for two extra pages to account
		 * for these.
		 */

		guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
		if (kernel_memory_allocate(kernel_map, &stack,
					   KERNEL_STACK_SIZE + (2*PAGE_SIZE),
					   stack_addr_mask,
					   KMA_KOBJECT | guard_flags)
		    != KERN_SUCCESS)
			panic("stack_alloc: kernel_memory_allocate");

		/*
		 * The stack address that comes back is the address of the lower
		 * guard page.  Skip past it to get the actual stack base address.
		 */

		stack += PAGE_SIZE;
	}

	machine_stack_attach(thread, stack);
}
コード例 #5
0
static uintptr_t IOBMDPageProc(iopa_t * a)
{
    kern_return_t kr;
    vm_address_t  vmaddr  = 0;
    int           options = 0; // KMA_LOMEM;

    kr = kernel_memory_allocate(kernel_map, &vmaddr,
				page_size, 0, options, VM_KERN_MEMORY_IOKIT);

    if (KERN_SUCCESS != kr) vmaddr = 0;
    else 		    bzero((void *) vmaddr, page_size);

    return ((uintptr_t) vmaddr);
}
コード例 #6
0
ファイル: wait_queue.c プロジェクト: Prajna/xnu
static void
wait_queues_init(void)
{
	uint32_t	i, whsize;
	kern_return_t	kret;

	whsize = compute_wait_hash_size(processor_avail_count, machine_info.max_mem);
	num_wait_queues = (whsize / ((uint32_t)sizeof(struct wait_queue))) - 1;

	kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues, whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);

	if (kret != KERN_SUCCESS || wait_queues == NULL)
		panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);

	for (i = 0; i < num_wait_queues; i++) {
		wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
	}
}
コード例 #7
0
ファイル: spl-bmalloc.c プロジェクト: lundman/osx-spl-crypto
static void *
osif_malloc(sa_size_t size)
{
#ifdef IN_KERNEL
	void *tr;
	kern_return_t kr;
	
	kr = kernel_memory_allocate(kernel_map, &tr, size, 0, 0);
	
	if (kr == KERN_SUCCESS) {
		atomic_add_64(&bmalloc_allocated_total, size);
		return (tr);
	} else {
		return (NULL);
	}
#else
	return ((void*)malloc(size));
#endif /* IN_KERNEL */
}
コード例 #8
0
static io_pagealloc_t * 
iopa_allocpage(void)
{
    kern_return_t    kr;
    io_pagealloc_t * pa;
    vm_address_t     vmaddr = 0;

    int options = 0; // KMA_LOMEM;
    kr = kernel_memory_allocate(kernel_map, &vmaddr,
				page_size, 0, options);
    if (KERN_SUCCESS != kr) return (0);

    bzero((void *) vmaddr, page_size);
    pa = (typeof(pa)) (vmaddr + page_size - kIOPageAllocChunkBytes);

    pa->signature = kIOPageAllocSignature;
    pa->avail     = -2ULL;

    return (pa);
}
コード例 #9
0
ファイル: marching_cubes.c プロジェクト: AntonioCS/Corange
void marching_cubes_init() {
  
  const int full_size = width * height * depth;
  
  /* Point rendering data */
  
  vec4* point_data = malloc(sizeof(vec4) * full_size);
  
  if(point_data == NULL) {
    error("Not enough memory!");
  }

  int x, y, z;
  for(x = 0; x < width; x++)
  for(y = 0; y < height; y++)
  for(z = 0; z < depth; z++) {
    int id = x + y * width + z * width * height;
    vec4 position = vec4_new(x, y, z, 1);
    point_data[id] = position;
  }
  
  glGenBuffers(1, &point_positions);
  glBindBuffer(GL_ARRAY_BUFFER, point_positions);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vec4) * full_size, point_data, GL_STATIC_DRAW);
  free(point_data);
  
  vec4* point_color_data = malloc(sizeof(vec4) * full_size);
  memset(point_color_data, 0, sizeof(vec4) * full_size);
  glGenBuffers(1, &point_colors);
  glBindBuffer(GL_ARRAY_BUFFER, point_colors);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vec4) * full_size, point_color_data, GL_DYNAMIC_COPY);
  free(point_color_data);
  
  point_color_buffer = kernel_memory_from_glbuffer(point_colors);
  
  /* OpenCL volume */
  
  volume = kernel_memory_allocate(sizeof(float) * full_size);
  
  /* Vertex stuff */
  
  vec4* vertex_pos_data = malloc(sizeof(vec4) * MAX_VERTS);
  memset(vertex_pos_data, 0, sizeof(vec4) * MAX_VERTS);
  glGenBuffers(1, &vertex_positions);
  glBindBuffer(GL_ARRAY_BUFFER, vertex_positions);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vec4) * MAX_VERTS, vertex_pos_data, GL_DYNAMIC_COPY);
  free(vertex_pos_data);
  
  vertex_positions_buffer = kernel_memory_from_glbuffer(vertex_positions);
  
  vec4* vertex_norm_data = malloc(sizeof(vec4) * MAX_VERTS);
  memset(vertex_norm_data, 0, sizeof(vec4) * MAX_VERTS);
  glGenBuffers(1, &vertex_normals);
  glBindBuffer(GL_ARRAY_BUFFER, vertex_normals);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vec4) * MAX_VERTS, vertex_norm_data, GL_DYNAMIC_COPY);
  free(vertex_norm_data);
  
  vertex_normals_buffer = kernel_memory_from_glbuffer(vertex_normals);
  
  vertex_index = kernel_memory_allocate(sizeof(int));
  
  /* Kernels */
  
  kernel_program* marching_cubes = asset_get(P("./kernels/marching_cubes.cl"));
  
  write_point = kernel_program_get_kernel(marching_cubes, "write_point");
  kernel_set_argument(write_point, 0, sizeof(kernel_memory), &volume);
  kernel_set_argument(write_point, 4, sizeof(int), (void*)&width);
  kernel_set_argument(write_point, 5, sizeof(int), (void*)&height);
  kernel_set_argument(write_point, 6, sizeof(int), (void*)&depth);
  
  write_metaball = kernel_program_get_kernel(marching_cubes, "write_metaball");
  kernel_set_argument(write_metaball, 0, sizeof(kernel_memory), &volume);
  
  write_metaballs = kernel_program_get_kernel(marching_cubes, "write_metaballs");
  kernel_set_argument(write_metaballs, 0, sizeof(kernel_memory), &volume);
  
  write_clear = kernel_program_get_kernel(marching_cubes, "write_clear");
  kernel_set_argument(write_clear, 0, sizeof(kernel_memory), &volume);
  
  write_point_color_back = kernel_program_get_kernel(marching_cubes, "write_point_color_back");
  kernel_set_argument(write_point_color_back, 0, sizeof(kernel_memory), &volume);
  kernel_set_argument(write_point_color_back, 1, sizeof(kernel_memory), &point_color_buffer);
  
  construct_surface = kernel_program_get_kernel(marching_cubes, "construct_surface");
  kernel_set_argument(construct_surface, 0, sizeof(kernel_memory), &volume);
  
  generate_normals = kernel_program_get_kernel(marching_cubes, "generate_flat_normals");
  
  generate_normals_smooth = kernel_program_get_kernel(marching_cubes, "generate_smooth_normals");
  
}
コード例 #10
0
ファイル: unix_startup.c プロジェクト: Prajna/xnu
void
bsd_startupearly(void)
{
	vm_offset_t     firstaddr;
	vm_size_t       size;
	kern_return_t   ret;

	/* clip the number of buf headers upto 16k */
	if (max_nbuf_headers == 0)
		max_nbuf_headers = atop_kernel(sane_size / 50);	/* Get 2% of ram, but no more than we can map */
	if ((customnbuf == 0) && (max_nbuf_headers > 16384))
		max_nbuf_headers = 16384;
	if (max_nbuf_headers < CONFIG_MIN_NBUF)
		max_nbuf_headers = CONFIG_MIN_NBUF;

	/* clip the number of hash elements  to 200000 */
	if ( (customnbuf == 0 ) && nbuf_hashelements == 0) {
		nbuf_hashelements = atop_kernel(sane_size / 50);
		if (nbuf_hashelements > 200000)
			nbuf_hashelements = 200000;
	} else
		nbuf_hashelements = max_nbuf_headers;

	if (niobuf_headers == 0) {
		if (max_nbuf_headers < 4096)
			niobuf_headers = max_nbuf_headers;
		else
			niobuf_headers = (max_nbuf_headers / 2) + 2048;
	}
	if (niobuf_headers < CONFIG_MIN_NIOBUF)
		niobuf_headers = CONFIG_MIN_NIOBUF;

	size = (max_nbuf_headers + niobuf_headers) * sizeof(struct buf);
	size = round_page(size);

	ret = kmem_suballoc(kernel_map,
			    &firstaddr,
			    size,
			    FALSE,
			    VM_FLAGS_ANYWHERE,
			    &bufferhdr_map);

	if (ret != KERN_SUCCESS)
		panic("Failed to create bufferhdr_map");

	ret = kernel_memory_allocate(bufferhdr_map,
				     &firstaddr,
				     size,
				     0,
				     KMA_HERE | KMA_KOBJECT);

	if (ret != KERN_SUCCESS)
		panic("Failed to allocate bufferhdr_map");

	buf_headers = (struct buf *) firstaddr;
	bzero(buf_headers, size);

#if SOCKETS
	{
#if CONFIG_USESOCKTHRESHOLD
		static const unsigned int	maxspace = 64 * 1024;
#else
		static const unsigned int	maxspace = 128 * 1024;
#endif
		int             scale;

		nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES;

#if INET || INET6
		if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
			tcp_sendspace *= scale;
			tcp_recvspace *= scale;

			if (tcp_sendspace > maxspace)
				tcp_sendspace = maxspace;
			if (tcp_recvspace > maxspace)
				tcp_recvspace = maxspace;
		}
#endif /* INET || INET6 */
	}
#endif /* SOCKETS */

	if (vnodes_sized == 0) {
		if (!PE_get_default("kern.maxvnodes", &desiredvnodes, sizeof(desiredvnodes))) {
			/*
			 * Size vnodes based on memory 
			 * Number vnodes  is (memsize/64k) + 1024 
			 * This is the calculation that is used by launchd in tiger
			 * we are clipping the max based on 16G 
			 * ie ((16*1024*1024*1024)/(64 *1024)) + 1024 = 263168;
			 * CONFIG_VNODES is set to 263168 for "medium" configurations (the default)
			 * but can be smaller or larger. 
			 */
			desiredvnodes  = (sane_size/65536) + 1024;
#ifdef CONFIG_VNODES
				if (desiredvnodes > CONFIG_VNODES)
					desiredvnodes = CONFIG_VNODES;
#endif
		}
		vnodes_sized = 1;
	}
}
コード例 #11
0
ファイル: IOLib.cpp プロジェクト: Apple-FOSS-Mirror/xnu
mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, 
			                mach_vm_size_t alignment, bool contiguous)
{
    kern_return_t	kr;
    mach_vm_address_t	address;
    mach_vm_address_t	allocationAddress;
    mach_vm_size_t	adjustedSize;
    mach_vm_address_t	alignMask;

    if (size == 0)
	return (0);
    if (alignment == 0) 
        alignment = 1;

    alignMask = alignment - 1;
    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);

    contiguous = (contiguous && (adjustedSize > page_size))
                   || (alignment > page_size);

    if (contiguous || maxPhys)
    {
        int options = 0;
	vm_offset_t virt;

	adjustedSize = size;
        contiguous = (contiguous && (adjustedSize > page_size))
                           || (alignment > page_size);

	if (!contiguous)
	{
	    if (maxPhys <= 0xFFFFFFFF)
	    {
		maxPhys = 0;
		options |= KMA_LOMEM;
	    }
	    else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
	    {
		maxPhys = 0;
	    }
	}
	if (contiguous || maxPhys)
	{
	    kr = kmem_alloc_contig(kernel_map, &virt, size,
				   alignMask, atop(maxPhys), atop(alignMask), 0);
	}
	else
	{
	    kr = kernel_memory_allocate(kernel_map, &virt,
					size, alignMask, options);
	}
	if (KERN_SUCCESS == kr)
	    address = virt;
	else
	    address = 0;
    }
    else
    {
	adjustedSize += alignMask;
        allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);

        if (allocationAddress) {

            address = (allocationAddress + alignMask
                    + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
                    & (~alignMask);

            if (atop_32(address) != atop_32(address + size - 1))
                address = round_page(address);

            *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
                            - sizeof(mach_vm_address_t))) = adjustedSize;
            *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
                            = allocationAddress;
	} else
	    address = 0;
    }

    if (address) {
    IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
#if IOALLOCDEBUG
	debug_iomalloc_size += size;
#endif
    }

    return (address);
}
コード例 #12
0
ファイル: IOLib.cpp プロジェクト: Apple-FOSS-Mirror/xnu
void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
{
    kern_return_t	kr;
    vm_offset_t		address;
    vm_offset_t		allocationAddress;
    vm_size_t		adjustedSize;
    uintptr_t		alignMask;

    if (size == 0)
        return 0;
    if (alignment == 0) 
        alignment = 1;

    alignMask = alignment - 1;
    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);

    if (size > adjustedSize) {
	    address = 0;    /* overflow detected */
    }
    else if (adjustedSize >= page_size) {

        kr = kernel_memory_allocate(kernel_map, &address,
					size, alignMask, 0);
	if (KERN_SUCCESS != kr)
	    address = 0;

    } else {

	adjustedSize += alignMask;

	if (adjustedSize >= page_size) {

	    kr = kernel_memory_allocate(kernel_map, &allocationAddress,
					    adjustedSize, 0, 0);
	    if (KERN_SUCCESS != kr)
		allocationAddress = 0;

	} else
	    allocationAddress = (vm_address_t) kalloc(adjustedSize);

        if (allocationAddress) {
            address = (allocationAddress + alignMask
                    + (sizeof(vm_size_t) + sizeof(vm_address_t)))
                    & (~alignMask);

            *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) 
			    = adjustedSize;
            *((vm_address_t *)(address - sizeof(vm_address_t)))
                            = allocationAddress;
	} else
	    address = 0;
    }

    assert(0 == (address & alignMask));

    if( address) {
#if IOALLOCDEBUG
		debug_iomalloc_size += size;
#endif
    	IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
	}

    return (void *) address;
}
コード例 #13
0
ファイル: particles.c プロジェクト: Bevilacqua/Corange
void particles_init() {

  particle_positions = malloc(sizeof(vector4) * particle_count);
  particle_velocities = malloc(sizeof(vector4) * particle_count);
  particle_lifetimes = malloc(sizeof(float) * particle_count);
  particle_randoms = malloc(sizeof(vector4) * particle_count);
  
  srand(time(NULL));
  
  for(int i = 0; i < particle_count; i++) {
    particle_lifetimes[i] = 999;
    particle_positions[i] = v4(0,0,0,0);
    particle_velocities[i] = v4(0,0,0,0);
    
    float rx = ((float)rand() / RAND_MAX) * 2 - 1;
    float ry = ((float)rand() / RAND_MAX) * 2 + 0.5;
    float rz = ((float)rand() / RAND_MAX) * 2 - 1;
    float rm = (float)rand() / RAND_MAX;
    
    vector3 rand = v3_mul(v3_normalize(v3(rx, ry, rz)), rm * 2);
    
    particle_randoms[i] = v4(rand.x, rand.y, rand.z, 0);
  }
    
  glGenBuffers(1, &positions_buffer);
  glBindBuffer(GL_ARRAY_BUFFER, positions_buffer);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vector4) * particle_count, particle_positions, GL_DYNAMIC_COPY);
  
  glGenBuffers(1, &velocities_buffer);
  glBindBuffer(GL_ARRAY_BUFFER, velocities_buffer);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vector4) * particle_count, particle_velocities, GL_DYNAMIC_COPY);
  
  glGenBuffers(1, &lifetimes_buffer);
  glBindBuffer(GL_ARRAY_BUFFER, lifetimes_buffer);
  glBufferData(GL_ARRAY_BUFFER, sizeof(float) * particle_count, particle_lifetimes, GL_DYNAMIC_COPY);
  
  glGenBuffers(1, &randoms_buffer);
  glBindBuffer(GL_ARRAY_BUFFER, randoms_buffer);
  glBufferData(GL_ARRAY_BUFFER, sizeof(vector4) * particle_count, particle_randoms, GL_DYNAMIC_COPY);
 
#ifdef OPEN_GL_CPU
  #ifndef CPU_ONLY
  k_particle_positions = kernel_memory_allocate(sizeof(vector4) * particle_count);
  k_particle_velocities = kernel_memory_allocate(sizeof(vector4) * particle_count);
  k_particle_lifetimes = kernel_memory_allocate(sizeof(float) * particle_count);
  k_particle_randoms = kernel_memory_allocate(sizeof(vector4) * particle_count);
  
  kernel_memory_write(k_particle_positions, sizeof(vector4) * particle_count, particle_positions);
  kernel_memory_write(k_particle_velocities, sizeof(vector4) * particle_count, particle_velocities);
  kernel_memory_write(k_particle_lifetimes, sizeof(float) * particle_count, particle_lifetimes);
  kernel_memory_write(k_particle_randoms, sizeof(vector4) * particle_count, particle_randoms);
  #endif
#else
  k_particle_positions = kernel_memory_from_glbuffer(positions_buffer);
  k_particle_velocities = kernel_memory_from_glbuffer(velocities_buffer);
  k_particle_lifetimes = kernel_memory_from_glbuffer(lifetimes_buffer);
  k_particle_randoms = kernel_memory_from_glbuffer(randoms_buffer);
#endif
  
  kernel_program* program = asset_get("./kernels/particles.cl");
  
  float max_life = 60.0;
  float min_velocity = 0.5;
  
#ifndef CPU_ONLY
  k_update = kernel_program_get_kernel(program, "particle_update");
  kernel_set_argument(k_update, 0, sizeof(kernel_memory), &k_particle_positions);
  kernel_set_argument(k_update, 1, sizeof(kernel_memory), &k_particle_velocities);
  kernel_set_argument(k_update, 2, sizeof(kernel_memory), &k_particle_lifetimes);
  kernel_set_argument(k_update, 3, sizeof(kernel_memory), &k_particle_randoms);
  kernel_set_argument(k_update, 4, sizeof(cl_float), &max_life);
  kernel_set_argument(k_update, 5, sizeof(cl_float), &min_velocity);
  kernel_set_argument(k_update, 9, sizeof(cl_int), &particle_count);
#endif
  
}
コード例 #14
0
void
bsd_startupearly()
{
	vm_offset_t		firstaddr;
	vm_size_t		size;
	kern_return_t	ret;

	if (nbuf == 0)
		nbuf = atop_64(sane_size / 100); /* Get 1% of ram, but no more than we can map */
	if (nbuf > 8192)
		nbuf = 8192;
	if (nbuf < 256)
		nbuf = 256;

	if (niobuf == 0)
		niobuf = nbuf;
	if (niobuf > 4096)
		niobuf = 4096;
	if (niobuf < 128)
		niobuf = 128;

	size = (nbuf + niobuf) * sizeof (struct buf);
	size = round_page_32(size);

	ret = kmem_suballoc(kernel_map,
			&firstaddr,
			size,
			FALSE,
			TRUE,
			&bufferhdr_map);

	if (ret != KERN_SUCCESS) 
		panic("Failed to create bufferhdr_map");
	
	ret = kernel_memory_allocate(bufferhdr_map,
			&firstaddr,
			size,
			0,
			KMA_HERE | KMA_KOBJECT);

	if (ret != KERN_SUCCESS)
		panic("Failed to allocate bufferhdr_map");

	buf = (struct buf * )firstaddr;
	bzero(buf,size);

	if ((sane_size > (64 * 1024 * 1024)) || ncl) {
		int scale;
		extern u_long tcp_sendspace;
		extern u_long tcp_recvspace;

		if ((nmbclusters = ncl) == 0) {
			if ((nmbclusters = ((sane_size / 16) / MCLBYTES)) > 16384)
				nmbclusters = 16384;
		}
		if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
			tcp_sendspace *= scale;
			tcp_recvspace *= scale;

			if (tcp_sendspace > (32 * 1024))
				tcp_sendspace = 32 * 1024;
			if (tcp_recvspace > (32 * 1024))
				tcp_recvspace = 32 * 1024;
		}
	}
}
コード例 #15
0
ファイル: gzalloc.c プロジェクト: Apple-FOSS-Mirror/xnu
vm_offset_t
gzalloc_alloc(zone_t zone, boolean_t canblock) {
	vm_offset_t addr = 0;

	if (__improbable(gzalloc_mode &&
		(((zone->elem_size >= gzalloc_min) &&
		    (zone->elem_size <= gzalloc_max))) &&
		(zone->gzalloc_exempt == 0))) {

		if (get_preemption_level() != 0) {
			if (canblock == TRUE) {
				pdzalloc_count++;
			}
			else
				return 0;
		}

		vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
		vm_offset_t residue = rounded_size - zone->elem_size;
		vm_offset_t gzaddr = 0;
		gzhdr_t *gzh;

		if (!kmem_ready || (vm_page_zone == ZONE_NULL)) {
			/* Early allocations are supplied directly from the
			 * reserve.
			 */
			if (gzalloc_reserve_size < rounded_size)
				panic("gzalloc reserve exhausted");
			gzaddr = gzalloc_reserve;
			/* No guard page for these early allocations, just
			 * waste an additional page.
			 */
			gzalloc_reserve += rounded_size + PAGE_SIZE;
			gzalloc_reserve_size -= rounded_size + PAGE_SIZE;
			OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc);
		}
		else {
			kern_return_t kr = kernel_memory_allocate(gzalloc_map,
			    &gzaddr, rounded_size + (1*PAGE_SIZE),
			    0, KMA_KOBJECT | gzalloc_guard);
			if (kr != KERN_SUCCESS)
				panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", (uint64_t)rounded_size, kr);

		}

		if (gzalloc_uf_mode) {
			gzaddr += PAGE_SIZE;
			/* The "header" becomes a "footer" in underflow
			 * mode.
			 */
			gzh = (gzhdr_t *) (gzaddr + zone->elem_size);
			addr = gzaddr;
		} else {
			gzh = (gzhdr_t *) (gzaddr + residue - GZHEADER_SIZE);
			addr = (gzaddr + residue);
		}

		/* Fill with a pattern on allocation to trap uninitialized
		 * data use. Since the element size may be "rounded up"
		 * by higher layers such as the kalloc layer, this may
		 * also identify overruns between the originally requested
		 * size and the rounded size via visual inspection.
		 * TBD: plumb through the originally requested size,
		 * prior to rounding by kalloc/IOMalloc etc.
		 * We also add a signature and the zone of origin in a header
		 * prefixed to the allocation.
		 */
		memset((void *)gzaddr, gzalloc_fill_pattern, rounded_size);

		gzh->gzone = (kmem_ready && vm_page_zone) ? zone : GZDEADZONE;
		gzh->gzsize = (uint32_t) zone->elem_size;
		gzh->gzsig = GZALLOC_SIGNATURE;

		lock_zone(zone);
		zone->count++;
		zone->sum_count++;
		zone->cur_size += rounded_size;
		unlock_zone(zone);

		OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated);
		OSAddAtomic64((SInt32) (rounded_size - zone->elem_size), &gzalloc_wasted);
	}
	return addr;
}