Beispiel #1
0
/* Free the last page allocated to uvm */
void uvm_page_free()
{
	uint32_t addr = (uint32_t)cur_task->uheap_end - PAGE_SIZE;
	frame_free(get_phys(addr, cur_task->page_dir));
	page_map(addr, 0, 0, cur_task->page_dir);
	cur_task->uheap_end -= PAGE_SIZE;
}
static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
{
	block_allocator * info;
	mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;

	MALI_DEBUG_ASSERT_POINTER(ctx);
	MALI_DEBUG_ASSERT_POINTER(block);
	info = (block_allocator*)ctx;

	if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;

	if (NULL != info->first_free)
	{
		void * virt;
		u32 phys;
		u32 size;
		block_info * alloc;
		alloc = info->first_free;

		phys = get_phys(info, alloc); /* Does not modify info or alloc */
		size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
		virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );

		/* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
		 * because it's unlikely another allocator will be able to map in. */

		if ( NULL != virt )
		{
			block->ctx = info; /* same as incoming ctx */
			block->handle = alloc;
			block->phys_base = phys;
			block->size = size;
			block->release = block_allocator_release_page_table_block;
			block->mapping = virt;

			info->first_free = alloc->next;

			alloc->next = NULL; /* Could potentially link many blocks together instead */

			result = MALI_MEM_ALLOC_FINISHED;
		}
	}
	else result = MALI_MEM_ALLOC_NONE;

	_mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);

	return result;
}
static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
{
	block_allocator * allocator;
	u32 left;
	block_info * last_allocated = NULL;
	int i = 0;

	BUG_ON(!ctx);
	BUG_ON(!mem);

	allocator = (block_allocator*)ctx;
	left = mem->size_bytes;

	BUG_ON(!left);
	BUG_ON(!&allocator->mutex);

	mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
	mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
	if (NULL == mem->block_array)
	{
		MSG_ERR(("Failed to allocate block array\n"));
		return 0;
	}

	if (down_interruptible(&allocator->mutex))
	{
		MSG_ERR(("Could not get mutex to do block_allocate\n"));
		return 0;
	}

	mem->size_bytes = 0;

	while ((left > 0) && (allocator->first_free))
	{
		block_info * block;

		block = allocator->first_free;
		allocator->first_free = allocator->first_free->next;
		block->next = last_allocated;
		last_allocated = block;
		allocator->num_free--;

		mem->block_array[i].addr = get_phys(allocator, block);
		mem->block_array[i].size = UMP_BLOCK_SIZE;
		mem->size_bytes += UMP_BLOCK_SIZE;

		i++;

		if (left < UMP_BLOCK_SIZE) left = 0;
		else left -= UMP_BLOCK_SIZE;
	}

	if (left)
	{
		block_info * block;
		/* release all memory back to the pool */
		while (last_allocated)
		{
			block = last_allocated->next;
			last_allocated->next = allocator->first_free;
			allocator->first_free = last_allocated;
			last_allocated = block;
			allocator->num_free++;
		}

		vfree(mem->block_array);
		mem->backend_info = NULL;
		mem->block_array = NULL;

		DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
		up(&allocator->mutex);

		return 0;
	}

	mem->backend_info = last_allocated;

	up(&allocator->mutex);
	mem->is_cached=0;

	return 1;
}
Beispiel #4
0
int
ar_rev(off_t sksz)
{
	off_t cpos;
	struct mtop mb;
	int phyblk;

	/*
	 * make sure we do not have try to reverse on a flawed archive
	 */
	if (lstrval < 0)
		return(lstrval);

	switch (artyp) {
	case ISPIPE:
		if (sksz <= 0)
			break;
		/*
		 * cannot go backwards on these critters
		 */
		paxwarn(1, "Reverse positioning on pipes is not supported.");
		lstrval = -1;
		return(-1);
	case ISREG:
	case ISBLK:
	case ISCHR:
	default:
		if (sksz <= 0)
			break;

		/*
		 * For things other than files, backwards movement has a very
		 * high probability of failure as we really do not know the
		 * true attributes of the device we are talking to (the device
		 * may not even have the ability to lseek() in any direction).
		 * First we figure out where we are in the archive.
		 */
		if ((cpos = lseek(arfd, (off_t)0L, SEEK_CUR)) < 0) {
			syswarn(1, errno,
			   "Unable to obtain current archive byte offset");
			lstrval = -1;
			return(-1);
		}

		/*
		 * we may try to go backwards past the start when the archive
		 * is only a single record. If this happens and we are on a
		 * multi-volume archive, we need to go to the end of the
		 * previous volume and continue our movement backwards from
		 * there.
		 */
		if ((cpos -= sksz) < (off_t)0L) {
			if (arvol > 1) {
				/*
				 * this should never happen
				 */
				paxwarn(1,"Reverse position on previous volume.");
				lstrval = -1;
				return(-1);
			}
			cpos = (off_t)0L;
		}
		if (lseek(arfd, cpos, SEEK_SET) < 0) {
			syswarn(1, errno, "Unable to seek archive backwards");
			lstrval = -1;
			return(-1);
		}
		break;
	case ISTAPE:
		/*
		 * Calculate and move the proper number of PHYSICAL tape
		 * blocks. If the sksz is not an even multiple of the physical
		 * tape size, we cannot do the move (this should never happen).
		 * (We also cannot handle trailers spread over two vols.)
		 * get_phys() also makes sure we are in front of the filemark.
		 */
		if ((phyblk = get_phys()) <= 0) {
			lstrval = -1;
			return(-1);
		}

		/*
		 * make sure future tape reads only go by physical tape block
		 * size (set rdblksz to the real size).
		 */
		rdblksz = phyblk;

		/*
		 * if no movement is required, just return (we must be after
		 * get_phys() so the physical blocksize is properly set)
		 */
		if (sksz <= 0)
			break;

		/*
		 * ok we have to move. Make sure the tape drive can do it.
		 */
		if (sksz % phyblk) {
			paxwarn(1,
			    "Tape drive unable to backspace requested amount");
			lstrval = -1;
			return(-1);
		}

		/*
		 * move backwards the requested number of bytes
		 */
		mb.mt_op = MTBSR;
		mb.mt_count = sksz/phyblk;
		if (ioctl(arfd, MTIOCTOP, &mb) < 0) {
			syswarn(1,errno, "Unable to backspace tape %d blocks.",
			    mb.mt_count);
			lstrval = -1;
			return(-1);
		}
		break;
	}
	lstrval = 1;
	return(0);
}
static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
{
	block_allocator * info;
	u32 left;
	block_info * last_allocated = NULL;
	mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
	block_allocator_allocation *ret_allocation;

	MALI_DEBUG_ASSERT_POINTER(ctx);
	MALI_DEBUG_ASSERT_POINTER(descriptor);
	MALI_DEBUG_ASSERT_POINTER(offset);
	MALI_DEBUG_ASSERT_POINTER(alloc_info);

	info = (block_allocator*)ctx;
	left = descriptor->size - *offset;
	MALI_DEBUG_ASSERT(0 != left);

	if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;

	ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );

	if ( NULL == ret_allocation )
	{
		/* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
		_mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
		return result;
	}

	ret_allocation->start_offset = *offset;
	ret_allocation->mapping_length = 0;

	while ((left > 0) && (info->first_free))
	{
		block_info * block;
		u32 phys_addr;
		u32 padding;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		padding = *offset & (MALI_BLOCK_SIZE-1);

 		if (MALI_BLOCK_SIZE - padding < left)
		{
			current_mapping_size = MALI_BLOCK_SIZE - padding;
		}
		else
		{
			current_mapping_size = left;
		}

		if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
		{
			MALI_DEBUG_PRINT(1, ("Mapping of physical memory  failed\n"));
			result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
			mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);

			/* release all memory back to the pool */
			while (last_allocated)
			{
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			break;
		}

		*offset += current_mapping_size;
		left -= current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;
	}

	_mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);

	if (last_allocated)
	{
		if (left) result = MALI_MEM_ALLOC_PARTIAL;
		else result = MALI_MEM_ALLOC_FINISHED;

		/* Record all the information about this allocation */
		ret_allocation->last_allocated = last_allocated;
		ret_allocation->engine = engine;
		ret_allocation->descriptor = descriptor;

		alloc_info->ctx = info;
		alloc_info->handle = ret_allocation;
		alloc_info->release = block_allocator_release;
	}
	else
	{
		/* Free the allocation information - nothing to be passed back */
		_mali_osk_free( ret_allocation );
	}

	return result;
}
mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
{
	_mali_osk_errcode_t err;
	mali_mem_allocation *descriptor;
	block_allocator *info;
	u32 left;
	block_info *last_allocated = NULL;
	block_allocator_allocation *ret_allocation;
	u32 offset = 0;

	size = ALIGN(size, MALI_BLOCK_SIZE);

	info = mali_mem_block_gobal_allocator;
	if (NULL == info) return NULL;

	left = size;
	MALI_DEBUG_ASSERT(0 != left);

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
	if (NULL == descriptor) {
		return NULL;
	}

	descriptor->mali_mapping.addr = mali_addr;
	descriptor->size = size;
	descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
	descriptor->cpu_mapping.ref = 1;

	if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
	} else {
		/* Cached Mali memory mapping */
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
		vma->vm_flags |= VM_SHARED;
	}

	ret_allocation = &descriptor->block_mem.mem;

	ret_allocation->mapping_length = 0;

	_mali_osk_mutex_wait(session->memory_lock);
	mutex_lock(&info->mutex);

	if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
		MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	err = mali_mem_mali_map_prepare(descriptor);
	if (_MALI_OSK_ERR_OK != err) {
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	while ((left > 0) && (info->first_free)) {
		block_info *block;
		u32 phys_addr;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		if (MALI_BLOCK_SIZE < left) {
			current_mapping_size = MALI_BLOCK_SIZE;
		} else {
			current_mapping_size = left;
		}

		mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
		if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
			/* release all memory back to the pool */
			while (last_allocated) {
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			mutex_unlock(&info->mutex);
			_mali_osk_mutex_signal(session->memory_lock);

			mali_mem_mali_map_free(descriptor);
			mali_mem_descriptor_destroy(descriptor);

			return NULL;
		}

		left -= current_mapping_size;
		offset += current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;

		--info->free_blocks;
	}

	mutex_unlock(&info->mutex);
	_mali_osk_mutex_signal(session->memory_lock);

	MALI_DEBUG_ASSERT(0 == left);

	/* Record all the information about this allocation */
	ret_allocation->last_allocated = last_allocated;
	ret_allocation->info = info;

	return descriptor;
}
Beispiel #7
0
static int ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args)
{
	int                    ret;
    unsigned int __user   *argp = (unsigned int __user *) args;
	unsigned long          physp;
	unsigned long          virtp;
	unsigned int           type;
	
	
	//__D("ioctl %d received. \n", cmd);
	
    switch (cmd) {
	case IPERA_INIT_PMU_STATICS:
		init_pmu_asm();
		__D("IPERA_INIT_PMU_STATICS : returning\n");
		break;

	case IPERA_START_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));

		//set_pmu_event_asm(PMU_ICACHE_EXEC, EVENT_COUNTER0);
		//set_pmu_event_asm(PMU_ICACHE_MISS, EVENT_COUNTER1);
		//set_pmu_event_asm(PMU_DCACHE_ACCESS, EVENT_COUNTER2);
		//set_pmu_event_asm(PMU_DCACHE_MISS, EVENT_COUNTER3);
		//start_pmu_asm();
		//__D("IPERA_START_PMU_CACHES_STATICS : returning\n");
		break;

	case IPERA_END_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//stop_pmu_asm();
		//pmu_statics[type].pmu_count		+= 1; 
		//pmu_statics[type].pmu_cycles    += get_clock_counter_asm();
		//pmu_statics[type].pmu_instr_exec    += get_pmnx_counter_asm(EVENT_COUNTER0);
		//pmu_statics[type].pmu_icache_miss    += get_pmnx_counter_asm(EVENT_COUNTER1);
		//pmu_statics[type].pmu_dcache_access    += get_pmnx_counter_asm(EVENT_COUNTER2);
		//pmu_statics[type].pmu_dcache_miss    += get_pmnx_counter_asm(EVENT_COUNTER3);
		//__D("IPERA_END_PMU_CACHES_STATICS : returning\n");
		break;
		
	case IPERA_GET_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//ret_get_cycles = pmu_statics[type].pmu_cycles;
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_count);
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_cycles);
		//__D("IPERA_GET_ICACHE_EXEC : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_instr_exec);
		//__D("IPERA_GET_ICACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_icache_miss);
		//__D("IPERA_GET_DCACHE_ACCESS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_access);
		//__D("IPERA_GET_DCACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_miss);
		//ret = copy_to_user(argp, &pmu_statics[type], sizeof(pmu_statics[type]));
		break;

	case IPERA_GET_PHYS:
		get_user(virtp, argp);
		physp = ipera_get_phys(virtp);
		put_user(physp, argp);
		//__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

		
#if 0
	case IPERA_GET_CYCLES:
		__D("IPERA_GET_CYCLES : received.\n");
		cur_cycles = get_cycles();
		copy_to_user(argp, &cur_cycles, sizeof(cur_cycles));
		__D("IPERA_GET_CYCLES : returning %#lx\n", cur_cycles);
		break;

	case IPERA_GET_PHYS:
		__D("IPERA_GET_PHYS : received.\n");
		get_user(virtp, argp);
		physp = get_phys(virtp);
		put_user(physp, argp);
		__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

    case IPERA_DMACPY:
        __D("IPERA_DMACPY : received.\n");
        if (copy_from_user(&dma, argp, sizeof(dma))) {
            return -EFAULT;
        }
        err = davinci_request_dma(DM350_DMA_CHANNEL_ANY, "EDMA memcpy", memcpy_dma_irq_handler, NULL, &master_ch, &tcc, EVENTQ_1);
        if (err < 0) {
            __E("Error in requesting Master channel %d = 0x%x\n", master_ch, err);
            return err;
        } else if(master_ch != 25)  __E("get channel %d \n", master_ch);
        davinci_stop_dma(master_ch);

        init_completion(&edmacompletion);
        davinci_set_dma_src_params(master_ch, (unsigned long) edmaparams.src, edmaparams.srcmode, edmaparams.srcfifowidth);
        davinci_set_dma_dest_params(master_ch, (unsigned long) edmaparams.dst, edmaparams.dstmode, edmaparams.dstfifowidth);
        davinci_set_dma_src_index(master_ch, edmaparams.srcbidx, edmaparams.srccidx);
        davinci_set_dma_dest_index(master_ch, edmaparams.dstbidx, edmaparams.dstcidx);
        davinci_set_dma_transfer_params(master_ch, edmaparams.acnt, edmaparams.bcnt, edmaparams.ccnt, edmaparams.bcntrld, edmaparams.syncmode);
        davinci_get_dma_params(master_ch, &paramentry);
        davinci_set_dma_params(master_ch, &paramentry);
        davinci_start_dma(master_ch);
        wait_for_completion(&edmacompletion);
        //printk("Dma completed... \n");
        davinci_stop_dma(master_ch);
        davinci_free_dma(master_ch);
        break;
#endif

    default:
        __E("Unknown ioctl received = %d.\n", cmd);
        return -EINVAL;
    }
    return 0;
}