ASCIIHexDecodeFilter::ASCIIHexDecodeFilter(InputStream *pSource) : CompoundFilter(pSource)
{
	unsigned char *ptr;
	int c, n;

	m_pBuffer = new unsigned char[pSource->Available() * 2];
	ptr = m_pBuffer;
	while (true)
	{
		c = pSource->Read();
		if (c == '>')
			break;
		if (c <= ' ')
			continue;
		n = HtoD(c) << 4;
		c = pSource->Read();
		n |= HtoD(c);
		*ptr++ = n;
		if (c == '>')
			break;
	}
	m_pStream = new ByteArrayInputStream(m_pBuffer, ptr - m_pBuffer);
}
Exemplo n.º 2
0
bool nvmed_rw_verify_area(NVMED_HANDLE* nvmed_handle, 
		unsigned long __start_lba, unsigned int len) {
	NVMED *nvmed = HtoD(nvmed_handle);
	NVMED_DEVICE_INFO *dev_info = nvmed->dev_info;
	unsigned long start_lba = nvmed->dev_info->start_sect + __start_lba;

	if(start_lba < dev_info->start_sect)
		return false;

	if((dev_info->start_sect + dev_info->nr_sects) < start_lba)
		return false;

	if((dev_info->start_sect + dev_info->nr_sects)
			< (start_lba + len))
		return false;

	return true;
}
Exemplo n.º 3
0
/*
 * Destroy MQ Handle
 */
int nvmed_handle_destroy_mq(NVMED_HANDLE* nvmed_handle) {
	int i;

	if(nvmed_handle == NULL) return -NVMED_NOENTRY;
	if(!FLAG_ISSET(nvmed_handle, HANDLE_MQ)) {
		nvmed_printf("%s: failed to destroy MQ - not MQ\n",
				HtoD(nvmed_handle)->ns_path);

		return -NVMED_FAULT;
	}
	for(i=1; i<nvmed_handle->num_mq; i++) {
		pthread_spin_lock(&nvmed_handle->queue_mq[i]->mngt_lock);
		nvmed_handle->queue_mq[i]->numHandle--;
		pthread_spin_unlock(&nvmed_handle->queue_mq[i]->mngt_lock);
	}

	free(nvmed_handle->queue_mq);

	return nvmed_handle_destroy(nvmed_handle);
}
Exemplo n.º 4
0
/*
 * I/O Completion of specific I/O
 * target_id : submission id
 */
void nvmed_io_polling(NVMED_HANDLE* nvmed_handle, u16 target_id) {
	NVMED* nvmed;
	NVMED_QUEUE* nvmed_queue;
	NVMED_IOD* iod;
	volatile struct nvme_completion *cqe;
	u16 head, phase;
	nvmed_queue = HtoQ(nvmed_handle);
	nvmed = HtoD(nvmed_handle);

	pthread_spin_lock(&nvmed_queue->cq_lock);
	while(1) {
		head = nvmed_queue->cq_head;
		phase = nvmed_queue->cq_phase;
		iod = nvmed_queue->iod_arr + target_id;
		if(iod->status == IO_COMPLETE) {
			break;
		}
		cqe = (volatile struct nvme_completion *)&nvmed_queue->cqes[head];
		for (;;) {
			if((cqe->status & 1) == nvmed_queue->cq_phase)
				break;
		}

		if(++head == nvmed->dev_info->q_depth) {
			head = 0;
			phase = !phase;
		}

		iod = nvmed_queue->iod_arr + cqe->command_id;
		nvmed_complete_iod(iod);

		COMPILER_BARRIER();
		*(volatile u32 *)nvmed_queue->cq_db = head;
		nvmed_queue->cq_head = head;
		nvmed_queue->cq_phase = phase;
	}
	pthread_spin_unlock(&nvmed_queue->cq_lock);
}
Exemplo n.º 5
0
/* Get CACHE from free list or evict */
NVMED_CACHE* nvmed_get_cache(NVMED_HANDLE* nvmed_handle) {
	NVMED* nvmed = HtoD(nvmed_handle);
	NVMED_CACHE *cache = nvmed->free_head.tqh_first;
	NVMED_CACHE *__cache;
	NVMED_CACHE *ret_cache;
	int i;
	unsigned int start_lpaddr, end_lpaddr;
	TAILQ_HEAD(cache_list, nvmed_cache) temp_head;

	pthread_rwlock_wrlock(&nvmed->cache_radix_lock);
	pthread_spin_lock(&nvmed->cache_list_lock);

	if(cache==NULL)  {
		//HEAD -> LRU, //TAIL -> MRU
		//EVICT - LRU
		cache = nvmed->lru_head.tqh_first;
		if(!FLAG_ISSET(cache, CACHE_DIRTY)) {
			TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
			LIST_REMOVE(cache, handle_cache_list);
			radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
			FLAG_SET_FORCE(cache, 0);
			ret_cache = cache;
		}
		else {
			TAILQ_INIT(&temp_head);
			
			while(FLAG_ISSET_SYNC(cache, CACHE_LOCKED) || cache->ref != 0) {
				usleep(1);
			}
			
			start_lpaddr = cache->lpaddr;
			end_lpaddr = cache->lpaddr;
			
			__cache = cache->cache_list.tqe_next;

			TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
			radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
			TAILQ_INSERT_HEAD(&temp_head, cache, cache_list);

			for(i=1; i<NVMED_CACHE_FORCE_EVICT_MAX; i++) {
				cache = __cache;
				if(FLAG_ISSET_SYNC(cache, CACHE_LOCKED)) break;
				if(!FLAG_ISSET(cache, CACHE_DIRTY)) break;
				if(start_lpaddr != 0 && cache->lpaddr == start_lpaddr-1 ) {
					//front_merge
					start_lpaddr--;
					__cache = cache->cache_list.tqe_next;
					TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
					radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
					TAILQ_INSERT_HEAD(&temp_head, cache, cache_list);

					continue;
				}
				else if(cache->lpaddr == end_lpaddr+1) {
					//back_merge
					end_lpaddr++;
					__cache = cache->cache_list.tqe_next;
					TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
					radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
					TAILQ_INSERT_TAIL(&temp_head, cache, cache_list);

					continue;
				}
				else {
					break;
				}
			}
			
			if(FLAG_ISSET(cache, CACHE_DIRTY))
			nvmed_cache_io_rw(nvmed_handle, nvme_cmd_write, temp_head.tqh_first, 
					start_lpaddr * PAGE_SIZE, (end_lpaddr - start_lpaddr) * PAGE_SIZE, 
					HANDLE_SYNC_IO);

			cache = temp_head.tqh_first;
			FLAG_SET_FORCE(cache, 0);
			ret_cache = cache;

			TAILQ_REMOVE(&temp_head, cache, cache_list);
			LIST_REMOVE(cache, handle_cache_list);

			while(temp_head.tqh_first != NULL) {
				TAILQ_REMOVE(&temp_head, temp_head.tqh_first, cache_list);
				TAILQ_INSERT_HEAD(&nvmed->free_head, temp_head.tqh_first, cache_list);
				nvmed->num_cache_usage--;
			}
		}
	}
	else {
		// Remove From Free Queue
		TAILQ_REMOVE(&nvmed->free_head, cache, cache_list);
		FLAG_UNSET_SYNC(cache, CACHE_FREE);
		if(FLAG_ISSET(cache, CACHE_UNINIT)) {
			memset(cache->ptr, 0, PAGE_SIZE);
			virt_to_phys(nvmed, cache->ptr, &cache->paddr, 4096);
			FLAG_UNSET_SYNC(cache, CACHE_UNINIT);
		}
		ret_cache = cache;
	}

	INIT_SYNC(ret_cache->ref);
	pthread_spin_unlock(&nvmed->cache_list_lock);
	pthread_rwlock_unlock(&nvmed->cache_radix_lock);

	return ret_cache;
}
Exemplo n.º 6
0
/*
 * Send I/O to submission queue and ring SQ Doorbell
 */
ssize_t nvmed_io(NVMED_HANDLE* nvmed_handle, u8 opcode, 
		u64 prp1, u64 prp2, void* prp2_addr, NVMED_CACHE *__cache, 
		unsigned long start_lba, unsigned int len, int flags, NVMED_AIO_CTX* context) {
	NVMED_QUEUE* nvmed_queue;
	NVMED* nvmed;
	struct nvme_command *cmnd;
	NVMED_IOD* iod;
	u16	target_id;
	NVMED_CACHE *cache = NULL;
	int i, num_cache;

	nvmed_queue = HtoQ(nvmed_handle);
	nvmed = HtoD(nvmed_handle);

	pthread_spin_lock(&nvmed_queue->sq_lock);

	while(1) {
		target_id = nvmed_queue->iod_pos++;
		iod = nvmed_queue->iod_arr + target_id;
		if(nvmed_queue->iod_pos == nvmed->dev_info->q_depth)
			nvmed_queue->iod_pos = 0;
		if(iod->status != IO_INIT)
			break;
	}

	iod->sq_id = nvmed_queue->sq_tail;
	iod->prp_addr = prp2_addr;
	iod->prp_pa = prp2;
	iod->status = IO_INIT;
	iod->num_cache = 0;
	iod->cache = NULL;
	iod->nvmed_handle = nvmed_handle;
	iod->context = context;
	if(iod->context!=NULL) {
		iod->context->num_init_io++;
		iod->context->status = AIO_PROCESS;
	}

	if(__cache != NULL) {
		num_cache = len / PAGE_SIZE;
		cache = __cache;
		iod->cache = calloc(len / PAGE_SIZE, sizeof(NVMED_CACHE*));
		for(i=0; i<num_cache; i++) {
			iod->cache[i] = cache;
			cache = cache->cache_list.tqe_next;
		}
		iod->num_cache = num_cache;
	}

	cmnd = &nvmed_queue->sq_cmds[nvmed_queue->sq_tail];
	memset(cmnd, 0, sizeof(*cmnd));

	//remap start_lba
	start_lba += nvmed->dev_info->start_sect;

	switch(opcode) {
		case nvme_cmd_flush:
			cmnd->rw.opcode = nvme_cmd_flush;
			cmnd->rw.command_id = target_id;
			cmnd->rw.nsid = nvmed->dev_info->ns_id;
			
			break;

		case nvme_cmd_write:
		case nvme_cmd_read:
			cmnd->rw.opcode = opcode;
			cmnd->rw.command_id = target_id;
			cmnd->rw.nsid = nvmed->dev_info->ns_id;
			cmnd->rw.prp1 = prp1;
			cmnd->rw.prp2 = prp2;
			cmnd->rw.slba = start_lba >> nvmed->dev_info->lba_shift;
			cmnd->rw.length = (len >> nvmed->dev_info->lba_shift) - 1;
			cmnd->rw.control = 0;
			cmnd->rw.dsmgmt = 0;
			
			break;
		
		case nvme_cmd_dsm:
			cmnd->dsm.opcode = nvme_cmd_dsm;
			cmnd->dsm.command_id = target_id;
			cmnd->dsm.nsid = nvmed->dev_info->ns_id;
			cmnd->dsm.prp1 = prp1;
			cmnd->dsm.prp2 = 0;
			cmnd->dsm.nr = 0;
			cmnd->dsm.attributes = NVME_DSMGMT_AD;
			
			break;
	}

	if(++nvmed_queue->sq_tail == nvmed->dev_info->q_depth) 
		nvmed_queue->sq_tail = 0;

	COMPILER_BARRIER();
	*(volatile u32 *)nvmed_queue->sq_db = nvmed_queue->sq_tail;

	pthread_spin_unlock(&nvmed_queue->sq_lock);
	
	/* If Sync I/O => Polling */
	if(__FLAG_ISSET(flags, HANDLE_SYNC_IO)) {
		nvmed_io_polling(nvmed_handle, target_id);
	}

	return len;
}
Exemplo n.º 7
0
/*
 * Make PRP List for Multiple page I/O from user buffer
 */
int make_prp_list(NVMED_HANDLE* nvmed_handle, void* buf, 
		unsigned long lba_offs, unsigned int io_size, u64* __paBase, 
		u64* prp1, u64* prp2, void** prp2_addr) {
	unsigned int startBufPos = lba_offs / PAGE_SIZE;
	unsigned int numBuf = io_size / PAGE_SIZE;
	unsigned int i;
	u64 *prpTmp;
	u64 *prpBuf;

	u64 *paBase = __paBase;
	u64 __prp1, __prp2;

	u64 paList[64];
	unsigned int bufOffs;

	*prp2_addr = NULL;

	if(io_size % PAGE_SIZE > 0) numBuf ++;

	if(paBase == NULL) {
		numBuf = virt_to_phys(HtoD(nvmed_handle), buf, paList, numBuf * PAGE_SIZE);
		bufOffs = (unsigned long)buf % PAGE_SIZE;
		__prp1 = paList[0] + bufOffs;
		if(numBuf == 1) {
			__prp2 = 0;
		}
		else if(numBuf == 2) {
			__prp2 = paList[1];
		}
		else {
			prpBuf = nvmed_handle_get_prp(nvmed_handle, &__prp2);
			*prp2_addr = prpBuf;
			for(i = 1; i < numBuf; i++) {
				prpBuf[i-1] = paList[i];
			}
		}
	}
	else {
		paBase += startBufPos;
		prpTmp = paBase;
		__prp1 = *prpTmp;
		if(numBuf == 1) {
			__prp2 = 0;
		}
		else if(numBuf == 2) {
			__prp2 = *(prpTmp+1);
		}
		else {
			prpBuf = nvmed_handle_get_prp(nvmed_handle, &__prp2);
			*prp2_addr = prpBuf;
			for(i = 1; i < numBuf; i++) {
				prpBuf[i-1] = paBase[i];
			}
		}

	}
	*prp1 = __prp1;
	*prp2 = __prp2;

	return 0;
}