static void active_worker(struct work_struct *work)
{
	OMAPLFB_DEVINFO *psDevInfo =
		container_of(work, OMAPLFB_DEVINFO, active_work);
	OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
	OMAPLFB_BUFFER *psBuffer;

	OMAPLFBSync();

	mutex_lock(&psDevInfo->active_list_lock);
	if (list_empty(&psDevInfo->active_list)) {
		pr_warning("omaplfb: syncing with no active buffer\n");
		mutex_unlock(&psDevInfo->active_list_lock);
		return;
	}

	psBuffer = list_first_entry(&psDevInfo->active_list,
				    OMAPLFB_BUFFER, list);

	list_del_init(&psBuffer->list);

	if (!list_empty(&psDevInfo->active_list)) {
		psBuffer = list_first_entry(&psDevInfo->active_list,
					    OMAPLFB_BUFFER, list);
		OMAPLFBFlip(psSwapChain,
			    (unsigned long)psBuffer->sSysAddr.uiAddr);

		psSwapChain->psPVRJTable->
			pfnPVRSRVCmdComplete(psBuffer->hCmdCookie, IMG_TRUE);

		queue_work(psDevInfo->workq, &psDevInfo->active_work);
	}
	mutex_unlock(&psDevInfo->active_list_lock);
}
Exemple #2
0
static void dt3155_stop_streaming(struct vb2_queue *q)
{
	struct dt3155_priv *pd = vb2_get_drv_priv(q);
	struct vb2_buffer *vb;

	spin_lock_irq(&pd->lock);
	/* stop the board */
	write_i2c_reg_nowait(pd->regs, CSR2, pd->csr2);
	iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
		  FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1);
	/* disable interrupts, clear all irq flags */
	iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR);
	spin_unlock_irq(&pd->lock);

	/*
	 * It is not clear whether the DMA stops at once or whether it
	 * will finish the current frame or field first. To be on the
	 * safe side we wait a bit.
	 */
	msleep(45);

	spin_lock_irq(&pd->lock);
	if (pd->curr_buf) {
		vb2_buffer_done(&pd->curr_buf->vb2_buf, VB2_BUF_STATE_ERROR);
		pd->curr_buf = NULL;
	}

	while (!list_empty(&pd->dmaq)) {
		vb = list_first_entry(&pd->dmaq, typeof(*vb), done_entry);
		list_del(&vb->done_entry);
		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
	}
	spin_unlock_irq(&pd->lock);
}
Exemple #3
0
void spy_rw_buffer_reset_read(spy_rw_buffer_t *buffer, size_t pos)
{
    spy_mem_block_t *mem_block;

    assert(pos <= buffer->write_pos);

    buffer->read_block = NULL;
    buffer->read_base  = 0;
    buffer->read_pos   = 0;

    if (list_empty(&buffer->mem_blocks)) {
        assert(pos == 0);
        return;
    }

    mem_block = list_first_entry(&buffer->mem_blocks, spy_mem_block_t, list);

    while (buffer->read_base + mem_block->size < pos) {
        buffer->read_base += mem_block->size;

        mem_block = list_next_entry(mem_block, list);
    }

    buffer->read_block = mem_block;
    buffer->read_pos   = pos;
}
Exemple #4
0
/**
*  get a  fuse receive buffer from distributor


   @retval <> NULL on success
   @retval  NULL if no buffer 
*/
rozofs_fuse_rcv_buf_t *rozofs_fuse_alloc_rcv_buffer_pool()
{
    rozofs_fuse_rcv_buf_t *p_rcv_buf = NULL;
    if (list_empty(&rozofs_fuse_rcv_buf_head)) return NULL;
    rozofs_fuse_rcv_buf_count--;
    p_rcv_buf = list_first_entry(&rozofs_fuse_rcv_buf_head,rozofs_fuse_rcv_buf_t,list);
    list_remove(&p_rcv_buf->list);
    return p_rcv_buf;
}
Exemple #5
0
static void migration_thread(void *__data)
{
	int cpu = (long) __data;
	edf_wm_task_t *et;
	struct timespec ts;

	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		spin_lock_irq(&kthread[cpu].lock);
		if (list_empty(&kthread[cpu].list)) {
			spin_unlock_irq(&kthread[cpu].lock);
			schedule();
			set_current_state(TASK_INTERRUPTIBLE);
			continue;
		}

		/* get a task in the list by fifo. */
		et = list_first_entry(&kthread[cpu].list, 
							  edf_wm_task_t,
							  migration_list);
		list_del_init(&et->migration_list);
		spin_unlock_irq(&kthread[cpu].lock);

		/* account runtime. */
		jiffies_to_timespec(et->runtime[cpu], &ts);
		et->rt->task->dl.sched_runtime = timespec_to_ns(&ts);

		/* trace precise deadlines. */
		et->rt->deadline_time += et->deadline;
		et->rt->task->dl.sched_deadline = et->sched_split_deadline;
		et->rt->task->dl.deadline = et->next_release;
		et->next_release += et->sched_split_deadline;

		/* now let's migrate the task! */
		et->rt->task->dl.flags |= DL_NEW;
		migrate_task(et->rt, cpu);
		wake_up_process(et->rt->task);

		/* when the budget is exhausted, the deadline should be added by
		   et->sched_deadline but not by et->sched_split_deadline. */
		et->rt->task->dl.sched_deadline = et->sched_deadline;

		/* account runtime. */
		jiffies_to_timespec(et->runtime[cpu], &ts);
		et->rt->task->dl.runtime = timespec_to_ns(&ts);

		/* activate the timer for the next migration of this task. */
		if (et->last_cpu != cpu) {
			et->rt->task->dl.flags &= ~SCHED_EXHAUSTIVE;
			start_window_timer(et);
		}
		else {
			et->rt->task->dl.flags |= SCHED_EXHAUSTIVE;
		}
	}
}
Exemple #6
0
static int
do_action_off (const fence_kdump_opts_t *opts)
{
    int error;
    fd_set rfds;
    fence_kdump_msg_t msg;
    fence_kdump_node_t *node;
    struct timeval timeout;

    if (list_empty (&opts->nodes)) {
        return (1);
    } else {
        node = list_first_entry (&opts->nodes, fence_kdump_node_t, list);
    }

    timeout.tv_sec = opts->timeout;
    timeout.tv_usec = 0;

    FD_ZERO (&rfds);
    FD_SET (node->socket, &rfds);

    log_debug (0, "waiting for message from '%s'\n", node->addr);

    for (;;) {
        error = select (node->socket + 1, &rfds, NULL, NULL, &timeout);
        if (error < 0) {
            log_error (2, "select (%s)\n", strerror (errno));
            break;
        }
        if (error == 0) {
            log_debug (0, "timeout after %d seconds\n", opts->timeout);
            break;
        }

        if (read_message (node, &msg, sizeof (msg)) != 0) {
            continue;
        }

        if (msg.magic != FENCE_KDUMP_MAGIC) {
            log_debug (1, "invalid magic number '0x%X'\n", msg.magic);
            continue;
        }

        switch (msg.version) {
        case FENCE_KDUMP_MSGV1:
            log_debug (0, "received valid message from '%s'\n", node->addr);
            return (0);
        default:
            log_debug (1, "invalid message version '0x%X'\n", msg.version);
            continue;
        }
    }

    return (1);
}
Exemple #7
0
struct ntfs_mp *idx_blocks2mpl(const struct nhr_idx *idx)
{
	const struct nhr_idx_node *idxn;
	struct ntfs_mp *mp_buf, *mp;
	unsigned mp_buf_sz = 2;	/* Optimized for compact indexes */

	idxn = list_first_entry(&idx->nodes, typeof(*idxn), list);
	while (&idxn->list != &idx->nodes && idxn->vcn < 0)
		idxn = list_next_entry(idxn, list);

	if (&idxn->list == &idx->nodes)	/* No blocks */
		return NULL;
	if (idxn->vcn != 0)		/* Wrong start block */
		return NULL;

	mp_buf = malloc(mp_buf_sz * sizeof(*mp));
	mp = mp_buf;

	mp->vcn = 0;
	mp->lcn = idxn->lcn;
	mp->clen = 1;

	for (idxn = list_next_entry(idxn, list);
	     &idxn->list != &idx->nodes;
	     idxn = list_next_entry(idxn, list)) {
		if (mp->vcn + mp->clen != idxn->vcn) {
			free(mp_buf);
			return NULL;
		}

		if (mp->lcn + mp->clen == idxn->lcn) {
			mp->clen++;
		} else {
			mp++;
			mp->vcn = idxn->vcn;
			mp->lcn = idxn->lcn;
			mp->clen = 1;

			if ((mp - mp_buf) + 1 == mp_buf_sz) {
				mp_buf_sz += 4;
				mp_buf = realloc(mp_buf, mp_buf_sz * sizeof(*mp));
				mp = mp_buf + mp_buf_sz - 4 - 1;
			}
		}
	}

	/* Set end marker */
	mp++;
	mp->vcn = 0;
	mp->lcn = 0;
	mp->clen = 0;

	return mp_buf;
}
Exemple #8
0
gevent* gui_event_recv(gwindow *window)
{
	struct list_head *equeue;
	gevent *event = NULL;

	equeue = (window == NULL) ? &gui_equeue_public : &window->equeue;
	if(!list_empty(equeue)) {
		event = list_first_entry(equeue, gui_event_s, list);
		list_del(&event->list);
	}
	return event;
}
Exemple #9
0
static void
dt3155_stop_streaming(struct vb2_queue *q)
{
	struct dt3155_priv *pd = vb2_get_drv_priv(q);
	struct vb2_buffer *vb;

	spin_lock_irq(&pd->lock);
	while (!list_empty(&pd->dmaq)) {
		vb = list_first_entry(&pd->dmaq, typeof(*vb), done_entry);
		list_del(&vb->done_entry);
		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
	}
	spin_unlock_irq(&pd->lock);
	msleep(45); /* irq hendler will stop the hardware */
}
Exemple #10
0
static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
{
	struct dt3155_priv *ipd = dev_id;
	struct vb2_buffer *ivb;
	dma_addr_t dma_addr;
	u32 tmp;

	tmp = ioread32(ipd->regs + INT_CSR) & (FLD_START | FLD_END_ODD);
	if (!tmp)
		return IRQ_NONE;  /* not our irq */
	if ((tmp & FLD_START) && !(tmp & FLD_END_ODD)) {
		iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START,
							ipd->regs + INT_CSR);
		return IRQ_HANDLED; /* start of field irq */
	}
	tmp = ioread32(ipd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
	if (tmp) {
		iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
						FLD_DN_ODD | FLD_DN_EVEN |
						CAP_CONT_EVEN | CAP_CONT_ODD,
							ipd->regs + CSR1);
		mmiowb();
	}

	spin_lock(&ipd->lock);
	if (ipd->curr_buf && !list_empty(&ipd->dmaq)) {
		ipd->curr_buf->vb2_buf.timestamp = ktime_get_ns();
		ipd->curr_buf->sequence = ipd->sequence++;
		ipd->curr_buf->field = V4L2_FIELD_NONE;
		vb2_buffer_done(&ipd->curr_buf->vb2_buf, VB2_BUF_STATE_DONE);

		ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
		list_del(&ivb->done_entry);
		ipd->curr_buf = to_vb2_v4l2_buffer(ivb);
		dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0);
		iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
		iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
		iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE);
		iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE);
		mmiowb();
	}

	/* enable interrupts, clear all irq flags */
	iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
			FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
	spin_unlock(&ipd->lock);
	return IRQ_HANDLED;
}
Exemple #11
0
/*
 * deferred_probe_work_func() - Retry probing devices in the active list.
 */
static void deferred_probe_work_func(struct work_struct *work)
{
	struct device *dev;
	struct device_private *private;
	/*
	 * This block processes every device in the deferred 'active' list.
	 * Each device is removed from the active list and passed to
	 * bus_probe_device() to re-attempt the probe.  The loop continues
	 * until every device in the active list is removed and retried.
	 *
	 * Note: Once the device is removed from the list and the mutex is
	 * released, it is possible for the device get freed by another thread
	 * and cause a illegal pointer dereference.  This code uses
	 * get/put_device() to ensure the device structure cannot disappear
	 * from under our feet.
	 */
	mutex_lock(&deferred_probe_mutex);
	while (!list_empty(&deferred_probe_active_list)) {
		private = list_first_entry(&deferred_probe_active_list,
					typeof(*dev->p), deferred_probe);
		dev = private->device;
		list_del_init(&private->deferred_probe);

		get_device(dev);

		/*
		 * Drop the mutex while probing each device; the probe path may
		 * manipulate the deferred list
		 */
		mutex_unlock(&deferred_probe_mutex);

		/*
		 * Force the device to the end of the dpm_list since
		 * the PM code assumes that the order we add things to
		 * the list is a good order for suspend but deferred
		 * probe makes that very unsafe.
		 */
		device_pm_lock();
		device_pm_move_last(dev);
		device_pm_unlock();

		dev_dbg(dev, "Retrying from deferred list\n");
		bus_probe_device(dev);

		mutex_lock(&deferred_probe_mutex);

		put_device(dev);
	}
// Dequeue a ts packet from ts data queue.
MTV250_TS_PKT_INFO * mtv250_get_tsp(void)
{
	MTV250_TS_PKT_INFO *tsp = NULL;
	struct list_head *head_ptr = &mtv250_tsp_queue.head;
		
	if(mtv250_tsp_queue.cnt != 0) //if(!list_empty(head_ptr))
	{
		spin_lock(&mtv250_tsp_queue.lock);
		
		tsp = list_first_entry(head_ptr, MTV250_TS_PKT_INFO, link);
		list_del(&tsp->link);
		mtv250_tsp_queue.cnt--;

		spin_unlock(&mtv250_tsp_queue.lock);
	}
	
	return tsp;
}
Exemple #13
0
int spy_rw_buffer_expand(spy_rw_buffer_t *buffer)
{
    spy_mem_block_t *mem_block = NULL;

    if (list_empty(&server.free_mem_blocks)) {
        if (server.mem_blocks_alloc >= server.mem_blocks_alloc_limit) {
            spy_log(ERROR, "mem block reach limit");
            return -1;
        }

        mem_block = calloc(1, MEM_BLOCK_SIZE);
        if (!mem_block) {
            spy_log(ERROR, "calloc mem block failed");
            return -1;
        }

        INIT_LIST_HEAD(&mem_block->list);
        mem_block->size = MEM_BLOCK_SIZE - sizeof(spy_mem_block_t);

        server.mem_blocks_alloc++;
    } else {
        mem_block = list_first_entry(&server.free_mem_blocks,
                                     spy_mem_block_t, list);

        list_del_init(&mem_block->list);
    }

    assert(mem_block);

    // first mem block
    if (list_empty(&buffer->mem_blocks)) {
        assert(!buffer->read_block && !buffer->write_block);

        buffer->read_block = mem_block;
        buffer->write_block = mem_block;
    }

    list_add_tail(&mem_block->list, &buffer->mem_blocks);
    buffer->cap += mem_block->size;

    server.mem_blocks_used++;

    return 0;
}
Exemple #14
0
static int cln_write(struct triton_md_handler_t *h)
{
	struct tcp_client_t *cln = container_of(h, typeof(*cln), hnd);
	int k;

	while (cln->xmit_buf) {
		for (; cln->xmit_pos < cln->xmit_buf->size; cln->xmit_pos += k) {
			k = write(cln->hnd.fd, cln->xmit_buf->buf + cln->xmit_pos, cln->xmit_buf->size - cln->xmit_pos);
			if (k < 0) {
				if (errno == EAGAIN)
					return 0;
				if (errno != EPIPE)
					log_error("cli: tcp: write: %s\n", strerror(errno));
				goto disconn;
			}
		}

		_free(cln->xmit_buf);
		cln->xmit_pos = 0;

		if (list_empty(&cln->xmit_queue)) {
			cln->xmit_buf = NULL;
		} else {
			cln->xmit_buf = list_first_entry(&cln->xmit_queue,
							 typeof(*cln->xmit_buf),
							 entry);
			list_del(&cln->xmit_buf->entry);
		}
	}

	if (cln->disconnect)
		goto disconn;

	triton_md_disable_handler(&cln->hnd, MD_MODE_WRITE);

	return 0;

disconn:
	disconnect(cln);

	return -1;
}
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
	struct kvm_async_pf *work;

	while (!list_empty_careful(&vcpu->async_pf.done) &&
	      kvm_arch_can_inject_async_page_present(vcpu)) {
		spin_lock(&vcpu->async_pf.lock);
		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
					      link);
		list_del(&work->link);
		spin_unlock(&vcpu->async_pf.lock);

		kvm_arch_async_page_ready(vcpu, work);
		kvm_arch_async_page_present(vcpu, work);

		list_del(&work->queue);
		vcpu->async_pf.queued--;
		kmem_cache_free(async_pf_cache, work);
	}
}
Exemple #16
0
static void deferred_probe_work_func(struct work_struct *work)
{
	struct device *dev;
	struct device_private *private;
	mutex_lock(&deferred_probe_mutex);
	while (!list_empty(&deferred_probe_active_list)) {
		private = list_first_entry(&deferred_probe_active_list,
					typeof(*dev->p), deferred_probe);
		dev = private->device;
		list_del_init(&private->deferred_probe);

		get_device(dev);

		mutex_unlock(&deferred_probe_mutex);
		dev_dbg(dev, "Retrying from deferred list\n");
		bus_probe_device(dev);
		mutex_lock(&deferred_probe_mutex);

		put_device(dev);
	}
MTV250_TS_PKT_INFO *mtv250_alloc_tsp(void)
{	
	MTV250_TS_PKT_INFO *tsp = NULL;
	struct list_head *head_ptr = &mtv250_tsp_pool.head;
		
	if(mtv250_tsp_pool.cnt != 0) //if(!list_empty(head_ptr))
	{
		spin_lock(&mtv250_tsp_pool.lock);
		
		tsp = list_first_entry(head_ptr, MTV250_TS_PKT_INFO, link);
		list_del(&tsp->link);
		mtv250_tsp_pool.cnt--;
#ifdef _DEBUG_TSP_POOL
		max_used_tsp_cnt = MAX(max_used_tsp_cnt, MAX_NUM_TS_PKT_BUF-mtv250_tsp_pool.cnt);
#endif	
		spin_unlock(&mtv250_tsp_pool.lock);
	}
	
	return tsp;
	
}
/* iterate thru all the connectors, returning ones that are attached
 * to the same fb..
 */
struct drm_connector *omap_framebuffer_get_next_connector(
		struct drm_framebuffer *fb, struct drm_connector *from)
{
	struct drm_device *dev = fb->dev;
	struct list_head *connector_list = &dev->mode_config.connector_list;
	struct drm_connector *connector = from;

	if (!from)
		return list_first_entry(connector_list, typeof(*from), head);

	list_for_each_entry_from(connector, connector_list, head) {
		if (connector != from) {
			struct drm_encoder *encoder = connector->encoder;
			struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
			if (crtc && crtc->fb == fb)
				return connector;

		}
	}

	return NULL;
}
Exemple #19
0
void spy_rw_buffer_reset(spy_rw_buffer_t *buffer)
{
    spy_mem_block_t *mem_block;

    assert(buffer);

    while (!list_empty(&buffer->mem_blocks)) {
        mem_block = list_first_entry(&buffer->mem_blocks,
                                     spy_mem_block_t, list);
        list_move(&mem_block->list, &server.free_mem_blocks);

        server.mem_blocks_used--;
    }

    buffer->read_pos    = 0;
    buffer->read_base   = 0;
    buffer->write_pos   = 0;
    buffer->write_base  = 0;
    buffer->cap         = 0;

    buffer->read_block  = NULL;
    buffer->write_block = NULL;
}
static void active_worker(struct work_struct *work)
{
	OMAPLFB_DEVINFO *psDevInfo =
		container_of(work, OMAPLFB_DEVINFO, active_work);
	OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
	OMAPLFB_BUFFER *psBuffer;

	mutex_lock(&psDevInfo->active_list_lock);

	while (!list_empty(&psDevInfo->active_list)) {
		psBuffer = list_first_entry(&psDevInfo->active_list,
					    OMAPLFB_BUFFER, list);
		mutex_unlock(&psDevInfo->active_list_lock);
		OMAPLFBFlip(psSwapChain,
			    (unsigned long)psBuffer->sSysAddr.uiAddr);

		psSwapChain->psPVRJTable->
			pfnPVRSRVCmdComplete(psBuffer->hCmdCookie, IMG_TRUE);

		list_del_init(&psBuffer->list);
		mutex_lock(&psDevInfo->active_list_lock);
	}
	mutex_unlock(&psDevInfo->active_list_lock);
}
Exemple #21
0
static void for_each_acrd_file(struct acrd_handle *ah, const char *parent,
			       void (*func)(struct acrd_handle *ah,
					    const char *path, void *arg),
			       void *arg)
{
	LIST_HEAD(path_list);
	struct acrd_path_list_entry *entry;
	struct acrd_listcb listcb = {
		.cb = acrd_list_cb,
		.arg = &path_list,
	};

	acrd_list(ah, parent, 0, &listcb);

	while (!list_empty(&path_list)) {
		entry = list_first_entry(&path_list, typeof(*entry), list);

		func(ah, entry->path, arg);

		list_del(&entry->list);
		free(entry->path);
		free(entry);
	}
}
Exemple #22
0
/**
 * dwc_otg_device_start
 *
 * Start device mode on the chip.
 */
int dwc_otg_device_start(dwc_otg_device_t *_dev)
{
	int ret = 0, i;
	diepmsk_data_t diepmsk = { .d32 = 0 };
	doepmsk_data_t doepmsk = { .d32 = 0 };

	if(!_dev)
	{
		DWC_ERROR("%s passed a null device structure pointer!\n", __func__);
		return -EINVAL;
	}

	DWC_VERBOSE("%s(%p)\n", __func__, _dev);

	// Enable Interrupts
	if(dwc_otg_device_enable_interrupts(_dev))
		DWC_WARNING("Failed to enable device interrupts.\n");

	// Clear D(I|O)PCTLs.
	for(i = 0; i < _dev->core->num_eps; i++)
	{
		dwc_otg_write_reg32(&_dev->core->in_ep_registers[i]->diepctl, 0);
		dwc_otg_write_reg32(&_dev->core->out_ep_registers[i]->doepctl, 0);
	}

	// Enable EP Interrupts
	diepmsk.b.xfercompl = 1;
	diepmsk.b.ahberr = 1;
	diepmsk.b.timeout = 1;
	diepmsk.b.epdisabled = 1;
	diepmsk.b.inepnakeff = 1;
	dwc_otg_write_reg32(&_dev->core->device_registers->diepmsk, diepmsk.d32);

	doepmsk.b.xfercompl = 1;
	doepmsk.b.setup = 1;
	doepmsk.b.back2backsetup = 1;
	doepmsk.b.epdisabled = 1;
	dwc_otg_write_reg32(&_dev->core->device_registers->doepmsk, doepmsk.d32);
	
	// Reset the device.
	dwc_otg_device_usb_reset(_dev);

	// We're all set up, tell the usb_gadget framework
	// that we exist.
	ret = usb_gadget_register_controller(_dev);
	if(ret)
	{
		DWC_ERROR("Failed to register controller.\n");
		return ret;
	}
	

	return 0;
}

/**
 * dwc_otg_device_stop
 *
 * Stop device mode on the chip.
 */
void dwc_otg_device_stop(dwc_otg_device_t *_dev)
{
	if(!_dev)
	{
		DWC_WARNING("%s passed a null device structure pointer!\n", __func__);
		return;
	}

	if(dwc_otg_device_disable_interrupts(_dev))
		DWC_WARNING("Failed to disable device interrupts.\n");
}

/**
 * dwc_otg_device_enable_interrupts
 *
 * enables interrupts.
 */
int dwc_otg_device_enable_interrupts(dwc_otg_device_t *_dev)
{
	gintmsk_data_t gintmsk = { .d32 = 0 };

	DWC_VERBOSE("%s(%p)\n", __func__, _dev);

	gintmsk.b.usbreset = 1;
	gintmsk.b.usbsuspend = 1;
	gintmsk.b.disconnect = 1;
	gintmsk.b.inepintr = 1;
	gintmsk.b.outepintr = 1;
	gintmsk.b.enumdone = 1;
	gintmsk.b.otgintr = 1;
	gintmsk.b.epmismatch = 1;
	dwc_otg_modify_reg32(&_dev->core->registers->gintmsk, 0, gintmsk.d32);

	return 0;
}

/**
 * dwc_otg_device_disable_interrupts
 *
 * disables interrupts.
 */
int dwc_otg_device_disable_interrupts(dwc_otg_device_t *_dev)
{
	gintmsk_data_t gintmsk = { .d32 = 0 };

	DWC_VERBOSE("%s(%p)\n", __func__, _dev);

	gintmsk.b.usbreset = 1;
	gintmsk.b.usbsuspend = 1;
	gintmsk.b.disconnect = 1;
	gintmsk.b.inepintr = 1;
	gintmsk.b.outepintr = 1;
	gintmsk.b.enumdone = 1;
	gintmsk.b.otgintr = 1;
	gintmsk.b.epmismatch = 1;
	dwc_otg_modify_reg32(&_dev->core->registers->gintmsk, gintmsk.d32, 0);

	return 0;
}

/**
 * dwc_otg_device_usb_reset
 *
 * Resets the USB connection and re-enables EP0.
 */
int dwc_otg_device_usb_reset(dwc_otg_device_t *_dev)
{
	diepmsk_data_t diepmsk = { .d32 = 0 };
	doepmsk_data_t doepmsk = { .d32 = 0 };
	dcfg_data_t dcfg = { .d32 = 0 };
	daint_data_t daint = { .d32 = 0 };
	unsigned long flags;

	DWC_VERBOSE("%s(%p)\n", __func__, _dev);

	spin_lock_irqsave(&_dev->core->lock, flags);

	// Clear Device Address
	dcfg.d32 = dwc_otg_read_reg32(&_dev->core->device_registers->dcfg);
	dcfg.b.devaddr = 0;
	dcfg.b.epmscnt = 1; // TODO: if shared fifo
	dwc_otg_write_reg32(&_dev->core->device_registers->dcfg, dcfg.d32);

	// Reset global NAK registers.
	_dev->core->global_in_nak_count = 0;
	_dev->core->global_out_nak_count = 0;
	dwc_otg_core_clear_global_in_nak(_dev->core);
	dwc_otg_core_clear_global_out_nak(_dev->core);

	// Reset EPs
	dwc_otg_core_ep_reset(_dev->core);

	// Enable interrupts on EP0
	daint.b.inep0 = 1;
	daint.b.outep0 = 1;
	dwc_otg_write_reg32(&_dev->core->device_registers->daintmsk, daint.d32);

	// Clear EP0 interrupts
	dwc_otg_write_reg32(&_dev->core->in_ep_registers[0]->diepint, 0xffffffff);
	dwc_otg_write_reg32(&_dev->core->out_ep_registers[0]->doepint, 0xffffffff);

	// Enable EP Interrupts
	diepmsk.b.xfercompl = 1;
	diepmsk.b.ahberr = 1;
	diepmsk.b.timeout = 1;
	diepmsk.b.epdisabled = 1;
	diepmsk.b.inepnakeff = 1;
	dwc_otg_write_reg32(&_dev->core->device_registers->diepmsk, diepmsk.d32);

	doepmsk.b.xfercompl = 1;
	doepmsk.b.setup = 1;
	doepmsk.b.back2backsetup = 1;
	doepmsk.b.epdisabled = 1;
	dwc_otg_write_reg32(&_dev->core->device_registers->doepmsk, doepmsk.d32);
	
	spin_unlock_irqrestore(&_dev->core->lock, flags);

	// Notify gadget driver
	if(dwc_otg_gadget_driver && dwc_otg_gadget_driver->resume)
		dwc_otg_gadget_driver->resume(&dwc_otg_gadget);

	return 0;
}

/** The EP0 USB descriptor! */
static struct usb_endpoint_descriptor ep0_descriptor = {
	.bLength = USB_DT_ENDPOINT_SIZE,
	.bDescriptorType = USB_DT_ENDPOINT,

	.bEndpointAddress = 0,
	.bmAttributes = 0,
};

/** The OUT request for EP0. */
static dwc_otg_core_request_t ep0_out_request = {
	.request_type = DWC_EP_TYPE_CONTROL,
	.direction = DWC_OTG_REQUEST_OUT,
	.completed_handler = &dwc_otg_device_complete_ep0,
	.dont_free = 1,	
	.buffer_length = 64,
	.dma_buffer = NULL,
};

/** The IN request for EP0. */
static dwc_otg_core_request_t ep0_in_request = {
	.request_type = DWC_EP_TYPE_CONTROL,
	.direction = DWC_OTG_REQUEST_IN,
	.dont_free = 1,
	.buffer_length = 64,
	.dma_buffer = NULL,
};

/**
 * The device USB IRQ handler.
 */
irqreturn_t dwc_otg_device_irq(int _irq, void *_dev)
{
	dwc_otg_device_t *dev = (dwc_otg_device_t*)_dev;
	dwc_otg_core_t *core = dev->core;
	gintsts_data_t gintsts = { .d32 = 0 };
	gintsts_data_t gintclr = { .d32 = 0 };

	gintsts.d32 = dwc_otg_read_reg32(&core->registers->gintsts) & dwc_otg_read_reg32(&core->registers->gintmsk);
	DWC_VERBOSE("%s(%d, %p) gintsts=0x%08x\n", __func__, _irq, _dev, gintsts.d32);

	if(gintsts.b.otgintr)
	{
		gotgint_data_t gotgint;
		
		gotgint.d32 = dwc_otg_read_reg32(&core->registers->gotgint);
		DWC_DEBUG("otgintr 0x%08x\n", gotgint.d32);

		if(gotgint.b.sesenddet)
		{
			dcfg_data_t dcfg = { .d32 = dwc_otg_read_reg32(&core->device_registers->dcfg) };

			DWC_DEBUG("session end detected\n");

			dcfg.b.nzstsouthshk = 1;
			dwc_otg_write_reg32(&core->device_registers->dcfg, dcfg.d32);

			DWC_DEBUG("DCFG=0x%08x\n", dcfg.d32);

			schedule_work(&dev->disconnect_work);
		}

		// Clear OTG interrupts
		dwc_otg_write_reg32(&core->registers->gotgint, 0xffffffff);

		gintclr.b.otgintr = 1;
	}

	if(gintsts.b.enumdone)
	{
		int dwcSpeed;
		int i;
		dsts_data_t dsts;

		DWC_DEBUG("enumdone\n");

		// Enable EP0.
		dwc_otg_core_enable_ep(core, &core->endpoints[0], &ep0_descriptor);

		//ep0_in_request.queued = 0;
		//ep0_out_request.queued = 0;
		
		// Listen for setup packets on EP0.
		dwc_otg_device_receive_ep0(_dev);

		// Tell gadget driver our top speed.
		dsts.d32 = dwc_otg_read_reg32(&core->device_registers->dsts);
		switch (dsts.b.enumspd)
		{
		case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
			dwc_otg_gadget.speed = USB_SPEED_HIGH;
			dwcSpeed = DWC_OTG_HIGH_SPEED;
			break;

		case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
		case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
			dwc_otg_gadget.speed = USB_SPEED_FULL;
			dwcSpeed = DWC_OTG_FULL_SPEED;
			break;

		case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
			dwc_otg_gadget.speed = USB_SPEED_LOW;
			dwcSpeed = DWC_OTG_LOW_SPEED;
			break;
		}
		
		// Update the endpoint speeds.
		for(i = 1; i < core->num_eps; i++)
			core->endpoints[i].speed = dwcSpeed;

		gintclr.b.enumdone = 1;
	}

	if(gintsts.b.usbreset)
	{
		DWC_DEBUG("usbreset\n");
		
		//schedule_work(&dev->reset_work);
		dwc_otg_device_usb_reset(dev);

		gintclr.b.usbreset = 1;
	}

	if(gintsts.b.usbsuspend)
	{
		DWC_DEBUG("usbsuspend\n");
		
		schedule_work(&dev->suspend_work);

		gintclr.b.usbsuspend = 1;
	}

	if(gintsts.b.disconnect)
	{
		DWC_DEBUG("disconnect\n");

		schedule_work(&dev->disconnect_work);
		gintclr.b.disconnect = 1;
	}

	if(gintsts.b.epmismatch)
	{
		dtknq1_data_t t1;
		int t2, t3, t4;
		int i;
		int num_tokens;
		int doneEPs = 0;
		dwc_otg_core_request_t *req;

		DWC_ERROR("EP Mismatch.\n");

		dwc_otg_core_set_global_in_nak(dev->core);
		for(i = 0; i < dev->core->num_eps; i++)
		{
			dwc_otg_core_ep_t *ep = &core->endpoints[i];
			req = list_first_entry(&ep->transfer_queue, dwc_otg_core_request_t, queue_pointer);
			if(req && req->direction == DWC_OTG_REQUEST_IN)
			{
				dwc_otg_core_cancel_ep(dev->core, ep);
			}
		}
		dwc_otg_core_clear_global_in_nak(dev->core);

		t1.d32 = dwc_otg_read_reg32(&dev->core->device_registers->dtknqr1);
		t2 = dwc_otg_read_reg32(&dev->core->device_registers->dtknqr2);
		t3 = dwc_otg_read_reg32(&dev->core->device_registers->dtknqr3_dthrctl);
		t4 = dwc_otg_read_reg32(&dev->core->device_registers->dtknqr4_fifoemptymsk);

		num_tokens = min((int)dev->core->hwcfg2.b.dev_token_q_depth, (int)t1.b.intknwptr);

		DWC_PRINT("Requeing %d requests.\n", num_tokens);

		for(i = 0; i < num_tokens; i++)
		{
			int ep;

#define GET_BITS(x, start, len) (((x) << ((sizeof(x)*8)-(len)-(start))) >> ((sizeof(x)*8)-(len)))

			if(i < 6)
				ep = GET_BITS((int)t1.b.epnums0_5, 8 + (i*4), 4);
			else if(i < 14)
				ep = GET_BITS(t2, (i-6)*4, 4);
			else if(i < 21)
				ep = GET_BITS(t3, (i-14)*4, 4);
			else if(i < 30)
				ep = GET_BITS(t4, (i-21)*4, 4);
			else
				break;

			if(doneEPs & (1 << ep))
				break;

			DWC_ERROR("Requeing %d.\n", ep);
	
			req = list_first_entry(&core->endpoints[i].transfer_queue, dwc_otg_core_request_t, queue_pointer);
			if(req && req->direction == DWC_OTG_REQUEST_IN)
				dwc_otg_core_start_request(core, req);
			else
				DWC_ERROR("Tried to requeue invalid request %p.\n", req);

			doneEPs |= (1 << ep);
		}

		for(i = 0; i < dev->core->num_eps; i++)
		{
			if(doneEPs & (1 << i))
				continue;
	
			req = list_first_entry(&core->endpoints[i].transfer_queue, dwc_otg_core_request_t, queue_pointer);
			if(req && req->direction == DWC_OTG_REQUEST_IN)
				dwc_otg_core_start_request(core, req);
			//else
			//	DWC_ERROR("Tried to requeue invalid request %p.\n", req);
		}

		gintclr.b.epmismatch = 1;
	}

	if(gintsts.b.inepintr || gintsts.b.outepintr)
	{
		daint_data_t daint = { .d32 = 0 };
		int i;

		DWC_DEBUG("epint\n");

		daint.d32 = dwc_otg_read_reg32(&core->device_registers->daint) & dwc_otg_read_reg32(&core->device_registers->daintmsk);

		for(i = 0; i < core->num_eps; i++)
		{
			if(daint.ep.in & 1)
				dwc_otg_device_handle_in_interrupt(dev, i);

			if(daint.ep.out & 1)
				dwc_otg_device_handle_out_interrupt(dev, i);

			daint.ep.out >>= 1;
			daint.ep.in >>= 1;
		}
	}

	// Clear the interrupts
	gintsts.d32 &= ~gintclr.d32;
	dwc_otg_write_reg32(&core->registers->gintsts, gintclr.d32);
	return gintsts.d32 != 0 ? IRQ_NONE : IRQ_HANDLED;
}

/**
 * The in endpoint interrupt handler.
 */
irqreturn_t dwc_otg_device_handle_in_interrupt(dwc_otg_device_t *_dev, int _ep)
{
	dwc_otg_core_ep_t *ep = &_dev->core->endpoints[_ep];
	diepint_data_t depint = { .d32 = 0 };
	diepint_data_t depclr = { .d32 = 0 };

	DWC_VERBOSE("%s(%p, %d)\n", __func__, _dev, _ep);

	depint.d32 = dwc_otg_read_reg32(&ep->in_registers->diepint) & dwc_otg_read_reg32(&_dev->core->device_registers->diepmsk);

	if(!ep->exists)
	{
		DWC_ERROR("%s called on non-existant in endpoint %d.\n", __func__, _ep);
		return -ENXIO;
	}

	if(depint.b.inepnakeff)
	{
		DWC_DEBUG("in nak eff %s\n", ep->name);

		complete_all(&ep->nakeff_completion);
		init_completion(&ep->nakeff_completion);

		depclr.b.inepnakeff = 1;
	}

	if(depint.b.epdisabled)
	{
		DWC_DEBUG("in epdisabled %s\n", ep->name);

		complete_all(&ep->disabled_completion);
		init_completion(&ep->disabled_completion);

		depclr.b.epdisabled = 1;
	}

	/*if(depint.b.intknepmis)
	{
		gintsts_data_t gintsts = { .d32 = 0 };

		DWC_DEBUG("in tn mis\n");

		gintsts.b.epmismatch = 1;
		dwc_otg_modify_reg32(&_dev->core->registers->gintsts, gintsts.d32, 0);
	}*/

	if(depint.b.intktxfemp)
	{
		DWC_DEBUG("in tx f emp\n");

		// Do nothing... ;_;
		
		depclr.b.intktxfemp = 1;
	}

	if(depint.b.timeout)
	{
		DWC_DEBUG("in timeout %s\n", ep->name);

		// Do something!?
		// Cancel sending?!
		
		depclr.b.timeout = 1;
	}

	if(depint.b.ahberr)
	{
		DWC_DEBUG("in ahberr\n");

		// Do nothing again... :'(
		
		depclr.b.ahberr = 1;
	}

	if(depint.b.xfercompl)
	{
		dwc_otg_core_request_t *req = list_first_entry(&ep->transfer_queue, dwc_otg_core_request_t, queue_pointer);

		DWC_DEBUG("in xfercompl %s\n", ep->name);

		if(list_empty(&ep->transfer_queue) || req->direction != DWC_OTG_REQUEST_IN)
		{
			DWC_ERROR("XferCompl received when we weren't transferring anything!\n");
		}
		else
			dwc_otg_core_complete_request(_dev->core, req);
		
		depclr.b.xfercompl = 1;
	}

	// Clear interrupts
	dwc_otg_write_reg32(&ep->in_registers->diepint, depclr.d32);
	
	return 0;
}

/**
 * The out endpoint interrupt handler.
 */
irqreturn_t dwc_otg_device_handle_out_interrupt(dwc_otg_device_t *_dev, int _ep)
{
	dwc_otg_core_ep_t *ep = &_dev->core->endpoints[_ep];
	doepint_data_t depint = { .d32 = 0 };
	doepint_data_t depclr = { .d32 = 0 };

	DWC_VERBOSE("%s(%p, %d)\n", __func__, _dev, _ep);

	depint.d32 = dwc_otg_read_reg32(&ep->out_registers->doepint) & dwc_otg_read_reg32(&_dev->core->device_registers->doepmsk);

	if(!ep->exists)
	{
		DWC_ERROR("%s called on non-existant in endpoint %d.\n", __func__, _ep);
		return -ENXIO;
	}

	if(depint.b.setup)
	{
		dwc_otg_core_request_t *req;

		// We received a setup packet, yaaay! 
		DWC_VERBOSE("setup\n");

		// If we had a setup packet, set the flag on the
		// request that says it was prepended by a setup
		// token.
		req = list_first_entry(&ep->transfer_queue, dwc_otg_core_request_t, queue_pointer);
		if(!list_empty(&ep->transfer_queue) && req->direction == DWC_OTG_REQUEST_OUT)
		{
			req->setup = 1;
			dwc_otg_core_complete_request(_dev->core, req);
		}
		else
			DWC_ERROR("Setup Packet Received without us initiating a transfer!\n");

		depclr.b.setup = 1;
	}

	if(depint.b.back2backsetup)
	{
		// Do nothing
		DWC_DEBUG("back2backsetup\n");

		depclr.b.back2backsetup = 1;
	}

	if(depint.b.epdisabled)
	{
		DWC_DEBUG("out epdisabled\n");

		complete_all(&ep->disabled_completion);
		init_completion(&ep->disabled_completion);

		depclr.b.epdisabled = 1;
	}

	if(depint.b.ahberr)
	{
		// Do nothing again... :'(
		DWC_DEBUG("out ahberr\n");

		depclr.b.ahberr = 1;
	}
		
	if(depint.b.outtknepdis)
	{
		// Nothing to do again! Wah... :''(
		DWC_DEBUG("out tkn epdis\n");

		depclr.b.outtknepdis = 1;
	}

	if(depint.b.xfercompl)
	{
		dwc_otg_core_request_t *req = list_first_entry(&ep->transfer_queue, dwc_otg_core_request_t, queue_pointer);

		DWC_DEBUG("out xfercompl\n");

		// Yay! :D

		if(list_empty(&ep->transfer_queue) || req->direction != DWC_OTG_REQUEST_OUT)
		{
			DWC_ERROR("XferCompl received when we weren't transferring anything!\n");
		}
		else
			dwc_otg_core_complete_request(_dev->core, req);

		depclr.b.xfercompl = 1;
	}

	// Clear interrupts
	dwc_otg_write_reg32(&ep->out_registers->doepint, depclr.d32);

	return 0;
}

/**
 * dwc_otg_device_receive_ep0
 */
void dwc_otg_device_receive_ep0(dwc_otg_device_t *_dev)
{
	ep0_out_request.data = _dev;
	ep0_out_request.length = 8; //ep0_out_request.buffer_length;

	if(ep0_out_request.dma_buffer == NULL)
	{
		ep0_out_request.dma_buffer = dma_alloc_coherent(NULL, ep0_out_request.buffer_length, &ep0_out_request.dma_address, GFP_KERNEL);
		if(!ep0_out_request.dma_buffer)
		{
			DWC_ERROR("Failed to allocate setup buffer for EP0!\n");
			return;
		}

		// As we've allocated a DMA buffer, we don't
		// need another. :D -- Ricky26
		ep0_out_request.buffer = ep0_out_request.dma_buffer;

		// Setup the in request, as we might need it as a response.
		ep0_in_request.length = ep0_in_request.buffer_length;
		ep0_in_request.dma_buffer = dma_alloc_coherent(NULL, ep0_in_request.buffer_length, &ep0_in_request.dma_address, GFP_KERNEL);
		if(!ep0_in_request.dma_buffer)
		{
			DWC_ERROR("Failed to allocate in buffer for EP0!\n");
			return;
		}

		ep0_in_request.buffer = ep0_in_request.dma_buffer;
	}

	DWC_VERBOSE("%s: enqueue (%p):%d\n", __func__, &ep0_out_request, ep0_out_request.length);
	dwc_otg_core_enqueue_request(_dev->core, &_dev->core->endpoints[0], &ep0_out_request);
}

/**
 * dwc_otg_device_send_ep0
 */
void dwc_otg_device_send_ep0(dwc_otg_device_t *_dev)
{
	ep0_in_request.data = _dev;
	dwc_otg_core_enqueue_request(_dev->core, &_dev->core->endpoints[0], &ep0_in_request);
}

/**
 * dwc_otg_device_complete_ep0
 *
 * This is called to complete an interrupt on EP0.
 */
void dwc_otg_device_complete_ep0(dwc_otg_core_request_t *_req)
{
	dwc_otg_core_t *core = _req->core;
	dwc_otg_device_t *dev = (dwc_otg_device_t*)_req->data;
	
	if(_req->cancelled)
	{
		DWC_VERBOSE("Shutting down EP0 control channel.\n");
		return;
	}

	if(_req->setup)
	{
		struct usb_ctrlrequest *packet = (struct usb_ctrlrequest*)_req->buffer;

#if defined(VERBOSE)&&defined(DEBUG)
		uint8_t *b = (uint8_t*)packet;
		
		DWC_PRINT("EP0 Sent us a setup packet! :3\n");
		DWC_PRINT("%02x %02x %02x %02x %02x %02x %02x %02x\n", 
				b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
		DWC_PRINT("RT %d, R %d, w %d\n", packet->bRequestType & 0x7F, packet->bRequest, packet->wIndex);
#endif

		if((packet->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
		{
			int set = 0;

			switch(packet->bRequest)
			{
				case USB_REQ_GET_STATUS:
					{
						uint16_t *result = (uint16_t*)ep0_in_request.buffer;

						switch (packet->bRequestType & USB_RECIP_MASK)
						{
						case USB_RECIP_DEVICE:
							*result = 0x1 | (1 << dev->remote_wakeup); // Self powered, remote wakeup enabled.
							break;

						case USB_RECIP_INTERFACE:
							*result = 0;
							break;

						case USB_RECIP_ENDPOINT:
							{
								// TODO: check this!
								int epnum = packet->wIndex & 0xf;
								if(epnum < core->num_eps &&
										list_empty(&core->endpoints[epnum].transfer_queue))
								{
									*result = 0;
									break;
								}

								*result = 1;
							}
							break;
						}
						
						ep0_in_request.length = 2;
						dwc_otg_device_send_ep0(dev);
					}
					goto exit;

				case USB_REQ_SET_FEATURE:
					set = 1;
					// fall through
				case USB_REQ_CLEAR_FEATURE:

					switch (packet->bRequestType & USB_RECIP_MASK)
					{
						case USB_RECIP_DEVICE:
							switch(packet->wValue)
							{
								case USB_DEVICE_REMOTE_WAKEUP:
									dev->remote_wakeup = set;
									break;
							}
							break;

						case USB_RECIP_ENDPOINT:
							{
								int ep = packet->wValue;
								if(ep == 0 || ep > core->num_eps)
									goto stall;

								dwc_otg_core_stall_ep(core, &core->endpoints[ep], set);
							}
							break;

					}
					goto send_zlp;

				case USB_REQ_SET_ADDRESS:
					{
						dcfg_data_t dcfg = { .d32 = 0 };

						dcfg.d32 = dwc_otg_read_reg32(&core->device_registers->dcfg);
						dcfg.b.devaddr = packet->wValue;
						dwc_otg_write_reg32(&core->device_registers->dcfg, dcfg.d32);

						DWC_VERBOSE("Received address %d.\n", dcfg.b.devaddr);
					}
					goto send_zlp;
			}
		}

		// If we got here we don't know how to handle
		// the setup packet, so let's ask the gadget driver to
		// do it, and then blame it if it doesn't work...

		DWC_VERBOSE("Passing setup packet to gadget driver.\n");
		if(dwc_otg_gadget_driver == NULL)
		{
			DWC_WARNING("No gadget driver yet, stalling.\n");
			goto stall;
		}

		if(dwc_otg_gadget_driver->setup(&dwc_otg_gadget, packet))
		{
			DWC_ERROR("Got a setup packet we don't handle properly yet.\n");
			goto stall;
		}

		goto exit;
	}
	else if(ep0_out_request.length == 0)
	{
		DWC_VERBOSE("Received a ZLP from host.\n");
	}
	else
	{
		DWC_ERROR("EP0 sent us some shit we don't know how to handle.\n");
	}

	goto exit;

stall:
	//dwc_otg_core_stall_ep(core, &core->endpoints[0], 1);
	{
		depctl_data_t depctl = { .d32 = dwc_otg_read_reg32(&core->in_ep_registers[0]->diepctl) };
		depctl.b.stall = 1;
		depctl.b.cnak = 1;
		dwc_otg_write_reg32(&core->in_ep_registers[0]->diepctl, depctl.d32);
	}
	goto exit;

send_zlp:
	ep0_in_request.length = 0;
	dwc_otg_device_send_ep0(dev);

exit:
	dwc_otg_device_receive_ep0(dev);
}