コード例 #1
0
ファイル: udp.c プロジェクト: Cycle-Applications/node
static void uv__udp_recvmsg(uv_loop_t* loop,
                            uv__io_t* w,
                            unsigned int revents) {
  struct sockaddr_storage peer;
  struct msghdr h;
  uv_udp_t* handle;
  ssize_t nread;
  uv_buf_t buf;
  int flags;
  int count;

  handle = container_of(w, uv_udp_t, io_watcher);
  assert(handle->type == UV_UDP);
  assert(revents & UV__POLLIN);

  assert(handle->recv_cb != NULL);
  assert(handle->alloc_cb != NULL);

  /* Prevent loop starvation when the data comes in as fast as (or faster than)
   * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
   */
  count = 32;

  memset(&h, 0, sizeof(h));
  h.msg_name = &peer;

  do {
    buf = handle->alloc_cb((uv_handle_t*)handle, 64 * 1024);
    assert(buf.len > 0);
    assert(buf.base != NULL);

    h.msg_namelen = sizeof(peer);
    h.msg_iov = (void*) &buf;
    h.msg_iovlen = 1;

    do {
      nread = recvmsg(handle->io_watcher.fd, &h, 0);
    }
    while (nread == -1 && errno == EINTR);

    if (nread == -1) {
      if (errno == EAGAIN || errno == EWOULDBLOCK) {
        uv__set_sys_error(handle->loop, EAGAIN);
        handle->recv_cb(handle, 0, buf, NULL, 0);
      }
      else {
        uv__set_sys_error(handle->loop, errno);
        handle->recv_cb(handle, -1, buf, NULL, 0);
      }
    }
    else {
      flags = 0;

      if (h.msg_flags & MSG_TRUNC)
        flags |= UV_UDP_PARTIAL;

      handle->recv_cb(handle,
                      nread,
                      buf,
                      (struct sockaddr*)&peer,
                      flags);
    }
  }
  /* recv_cb callback may decide to pause or close the handle */
  while (nread != -1
      && count-- > 0
      && handle->io_watcher.fd != -1
      && handle->recv_cb != NULL);
}
コード例 #2
0
ファイル: SSHRSA.C プロジェクト: TortoiseGit/TortoiseGit
static void rsa2_freekey(ssh_key *key)
{
    RSAKey *rsa = container_of(key, RSAKey, sshk);
    freersakey(rsa);
    sfree(rsa);
}
コード例 #3
0
ファイル: accel_msg.c プロジェクト: zhoupeng/spice4xen
void netfront_accel_msg_from_bend(void *context)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
	netfront_accel_vnic *vnic = 
		container_of(context, netfront_accel_vnic, msg_from_bend);
#else
	netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
#endif
	struct net_accel_msg msg;
	int err, queue_was_full = 0;
	
	mutex_lock(&vnic->vnic_mutex);

	/*
	 * This happens when the shared pages have been unmapped but
	 * the workqueue has yet to be flushed 
	 */
	if (!vnic->dom0_state_is_setup) 
		goto unlock_out;

	while ((vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK)
	       != 0) {
		if (vnic->shared_page->aflags &
		    NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL) {
			/* We've been told there may now be space. */
			clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
				  (unsigned long *)&vnic->shared_page->aflags);
		}

		if (vnic->shared_page->aflags &
		    NET_ACCEL_MSG_AFLAGS_QUEUE0FULL) {
			/*
			 * There will be space at the end of this
			 * function if we can make any.
			 */
			clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
				  (unsigned long *)&vnic->shared_page->aflags);
			queue_was_full = 1;
		}

		if (vnic->shared_page->aflags &
		    NET_ACCEL_MSG_AFLAGS_NETUPDOWN) {
			DPRINTK("%s: net interface change\n", __FUNCTION__);
			clear_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
				  (unsigned long *)&vnic->shared_page->aflags);
			if (vnic->shared_page->net_dev_up)
				netfront_accel_interface_up(vnic);
			else
				netfront_accel_interface_down(vnic);
		}
	}

	/* Pull msg out of shared memory */
	while ((err = net_accel_msg_recv(vnic->shared_page, &vnic->from_dom0,
					 &msg)) == 0) {
		err = vnic_process_rx_msg(vnic, &msg);
		
		if (err != 0)
			goto done;
	}

	/*
	 * Send any pending buffer map request messages that we can,
	 * and mark domU->dom0 as full if necessary.  
	 */
	if (vnic->msg_state == NETFRONT_ACCEL_MSG_HW &&
	    vnic->bufpages.page_reqs < vnic->bufpages.max_pages) {
		if (vnic_send_buffer_requests(vnic, &vnic->bufpages) == -ENOSPC)
			vnic_set_queue_full(vnic);
	}

	/* 
	 * If there are no messages then this is not an error.  It
	 * just means that we've finished processing the queue.
	 */
	if (err == -ENOENT)
		err = 0;
 done:
	/* We will now have made space in the dom0->domU queue if we can */
	if (queue_was_full)
		vnic_set_queue_not_full(vnic);

	if (err != 0) {
		EPRINTK("%s returned %d\n", __FUNCTION__, err);
		netfront_accel_set_closing(vnic);
	}

 unlock_out:
	mutex_unlock(&vnic->vnic_mutex);

	return;
}
コード例 #4
0
static int
gst_droidcamsrc_stream_window_dequeue_buffer (struct preview_stream_ops *w,
    buffer_handle_t ** buffer, int *stride)
{
  GstDroidCamSrcStreamWindow *win;
  GstBuffer *buff;
  GstFlowReturn ret;
  int trials;
  GstBufferPoolAcquireParams params;
  GstMemory *mem;
  struct ANativeWindowBuffer *native;
  int res;

  GST_DEBUG ("dequeue buffer %p", buffer);

  win = container_of (w, GstDroidCamSrcStreamWindow, window);

  g_mutex_lock (&win->lock);

retry:
  GST_DEBUG ("needs reconfigure? %d", win->needs_reconfigure);

  if (!win->pool || (win->pool && win->needs_reconfigure)) {
    /* create and re/configure the pool */
    gst_droidcamsrc_stream_window_reset_buffer_pool_locked (win);
  }

  if (!win->pool) {
    GST_ERROR ("failed to create buffer pool");
    res = -1;
    goto unlock_and_exit;
  }

  mem = NULL;
  trials = ACQUIRE_BUFFER_TRIALS;
  params.flags = GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;

  while (trials > 0) {
    ret =
        gst_buffer_pool_acquire_buffer (GST_BUFFER_POOL (win->pool), &buff,
        &params);
    if (ret == GST_FLOW_OK) {
      /* we have our buffer */
      break;
    } else if (ret == GST_FLOW_ERROR || ret == GST_FLOW_FLUSHING) {
      /* no point in waiting */
      break;
    }

    /* we need to unlock here to allow buffers to be returned back */
    g_mutex_unlock (&win->lock);
    usleep (ACQUIRE_BUFFER_TIMEOUT);
    g_mutex_lock (&win->lock);
    if (win->needs_reconfigure) {
      /* out of here */
      goto retry;
    }

    --trials;
  }

  if (buff) {
    /* handover */
    mem = gst_buffer_peek_memory (buff, 0);
  } else if (ret == GST_FLOW_FLUSHING) {
    GST_INFO ("pool is flushing");
  } else {
    GST_WARNING ("failed to get a buffer");
  }

  if (!mem) {
    GST_ERROR ("no buffer memory found");

    res = -1;
    goto unlock_and_exit;
  }

  native = gst_memory_get_native_buffer (mem);
  if (!native) {
    GST_ERROR ("invalid buffer");
    gst_buffer_unref (buff);

    res = -1;
    goto unlock_and_exit;
  }

  *buffer = &native->handle;
  *stride = native->stride;

  GST_LOG ("dequeue buffer done %p", *buffer);

  res = 0;

unlock_and_exit:
  g_mutex_unlock (&win->lock);
  return res;
}
コード例 #5
0
/**
 * This function is called by the Gadget Driver for each EP to be
 * configured for the current configuration (SET_CONFIGURATION).  
 * 
 * This function initializes the dwc_otg_ep_t data structure, and then
 * calls dwc_otg_ep_activate.
 */
static int dwc_otg_pcd_ep_enable(struct usb_ep *_ep,
				 const struct usb_endpoint_descriptor *_desc)
{
	dwc_otg_pcd_ep_t *ep = 0;
	dwc_otg_pcd_t *pcd = 0;
	unsigned long flags;

	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _desc);

	ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
	if (!_ep || !_desc || ep->desc ||
	    _desc->bDescriptorType != USB_DT_ENDPOINT) {
		DWC_WARN("%s, bad ep or descriptor\n", __func__);
		return -EINVAL;
	}
	if (ep == &ep->pcd->ep0) {
		DWC_WARN("%s, bad ep(0)\n", __func__);
		return -EINVAL;
	}

	/* Check FIFO size? */
	if (!_desc->wMaxPacketSize) {
		DWC_WARN("%s, bad %s maxpacket\n", __func__, _ep->name);
		return -ERANGE;
	}

	pcd = ep->pcd;
	if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
		DWC_WARN("%s, bogus device state\n", __func__);
		return -ESHUTDOWN;
	}

	SPIN_LOCK_IRQSAVE(&pcd->lock, flags);

	ep->desc = _desc;
	ep->ep.maxpacket = le16_to_cpu(_desc->wMaxPacketSize);

	/*
	 * Activate the EP
	 */
	ep->stopped = 0;

	ep->dwc_ep.is_in = (USB_DIR_IN & _desc->bEndpointAddress) != 0;
	ep->dwc_ep.maxpacket = ep->ep.maxpacket;

	ep->dwc_ep.type = _desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;

	if (ep->dwc_ep.is_in) {
		if (!pcd->otg_dev->core_if->en_multiple_tx_fifo) {
			ep->dwc_ep.tx_fifo_num = 0;

			if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
			    == USB_ENDPOINT_XFER_ISOC) {
				/* 
				 * if ISOC EP then assign a Periodic Tx FIFO.
				 */
				ep->dwc_ep.tx_fifo_num =
				    assign_perio_tx_fifo(pcd->otg_dev->core_if);
			}
		} else {
			/* 
			 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
			 */
			ep->dwc_ep.tx_fifo_num =
			    assign_tx_fifo(pcd->otg_dev->core_if);
		}
	}
	/* Set initial data PID. */
	if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
	    USB_ENDPOINT_XFER_BULK) {
		ep->dwc_ep.data_pid_start = 0;
	}

	DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n",
		    ep->ep.name, (ep->dwc_ep.is_in ? "IN" : "OUT"),
		    ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);

	dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
	SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
	return 0;
}
コード例 #6
0
static void
hfc4s8s_bh(struct work_struct *work)
{
	hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
	u_char b;
	struct hfc4s8s_l1 *l1p;
	volatile u_char *fifo_stat;
	int idx;

	/* handle layer 1 state changes */
	b = 1;
	l1p = hw->l1;
	while (b) {
		if ((b & hw->mr.r_irq_statech)) {
			/* reset l1 event */
			hw->mr.r_irq_statech &= ~b;
			if (l1p->enabled) {
				if (l1p->nt_mode) {
					u_char oldstate = l1p->l1_state;

					Write_hfc8(l1p->hw, R_ST_SEL,
						   l1p->st_num);
					l1p->l1_state =
					    Read_hfc8(l1p->hw,
						      A_ST_RD_STA) & 0xf;

					if ((oldstate == 3)
					    && (l1p->l1_state != 3))
						l1p->d_if.ifc.l1l2(&l1p->
								   d_if.
								   ifc,
								   PH_DEACTIVATE
								   |
								   INDICATION,
								   NULL);

					if (l1p->l1_state != 2) {
						del_timer(&l1p->l1_timer);
						if (l1p->l1_state == 3) {
							l1p->d_if.ifc.
							    l1l2(&l1p->
								 d_if.ifc,
								 PH_ACTIVATE
								 |
								 INDICATION,
								 NULL);
						}
					} else {
						/* allow transition */
						Write_hfc8(hw, A_ST_WR_STA,
							   M_SET_G2_G3);
						mod_timer(&l1p->l1_timer,
							  jiffies +
							  L1_TIMER_T1);
					}
					printk(KERN_INFO
					       "HFC-4S/8S: NT ch %d l1 state %d -> %d\n",
					       l1p->st_num, oldstate,
					       l1p->l1_state);
				} else {
					u_char oldstate = l1p->l1_state;

					Write_hfc8(l1p->hw, R_ST_SEL,
						   l1p->st_num);
					l1p->l1_state =
					    Read_hfc8(l1p->hw,
						      A_ST_RD_STA) & 0xf;

					if (((l1p->l1_state == 3) &&
					     ((oldstate == 7) ||
					      (oldstate == 8))) ||
					    ((timer_pending
					      (&l1p->l1_timer))
					     && (l1p->l1_state == 8))) {
						mod_timer(&l1p->l1_timer,
							  L1_TIMER_T4 +
							  jiffies);
					} else {
						if (l1p->l1_state == 7) {
							del_timer(&l1p->
								  l1_timer);
							l1p->d_if.ifc.
							    l1l2(&l1p->
								 d_if.ifc,
								 PH_ACTIVATE
								 |
								 INDICATION,
								 NULL);
							tx_d_frame(l1p);
						}
						if (l1p->l1_state == 3) {
							if (oldstate != 3)
								l1p->d_if.
								    ifc.
								    l1l2
								    (&l1p->
								     d_if.
								     ifc,
								     PH_DEACTIVATE
								     |
								     INDICATION,
								     NULL);
						}
					}
					printk(KERN_INFO
					       "HFC-4S/8S: TE %d ch %d l1 state %d -> %d\n",
					       l1p->hw->cardnum,
					       l1p->st_num, oldstate,
					       l1p->l1_state);
				}
			}
		}
		b <<= 1;
		l1p++;
	}

	/* now handle the fifos */
	idx = 0;
	fifo_stat = hw->mr.r_irq_fifo_blx;
	l1p = hw->l1;
	while (idx < hw->driver_data.max_st_ports) {

		if (hw->mr.timer_irq) {
			*fifo_stat |= hw->mr.fifo_rx_trans_enables[idx];
			if (hw->fifo_sched_cnt <= 0) {
				*fifo_stat |=
				    hw->mr.fifo_slow_timer_service[l1p->
								   st_num];
			}
		}
		/* ignore fifo 6 (TX E fifo) */
		*fifo_stat &= 0xff - 0x40;

		while (*fifo_stat) {

			if (!l1p->nt_mode) {
				/* RX Fifo has data to read */
				if ((*fifo_stat & 0x20)) {
					*fifo_stat &= ~0x20;
					rx_d_frame(l1p, 0);
				}
				/* E Fifo has data to read */
				if ((*fifo_stat & 0x80)) {
					*fifo_stat &= ~0x80;
					rx_d_frame(l1p, 1);
				}
				/* TX Fifo completed send */
				if ((*fifo_stat & 0x10)) {
					*fifo_stat &= ~0x10;
					tx_d_frame(l1p);
				}
			}
			/* B1 RX Fifo has data to read */
			if ((*fifo_stat & 0x2)) {
				*fifo_stat &= ~0x2;
				rx_b_frame(l1p->b_ch);
			}
			/* B1 TX Fifo has send completed */
			if ((*fifo_stat & 0x1)) {
				*fifo_stat &= ~0x1;
				tx_b_frame(l1p->b_ch);
			}
			/* B2 RX Fifo has data to read */
			if ((*fifo_stat & 0x8)) {
				*fifo_stat &= ~0x8;
				rx_b_frame(l1p->b_ch + 1);
			}
			/* B2 TX Fifo has send completed */
			if ((*fifo_stat & 0x4)) {
				*fifo_stat &= ~0x4;
				tx_b_frame(l1p->b_ch + 1);
			}
		}
		fifo_stat++;
		l1p++;
		idx++;
	}

	if (hw->fifo_sched_cnt <= 0)
		hw->fifo_sched_cnt += (1 << (7 - TRANS_TIMER_MODE));
	hw->mr.timer_irq = 0;	/* clear requested timer irq */
}				/* hfc4s8s_bh */
コード例 #7
0
ファイル: acpi_piix4.c プロジェクト: abligh/qemu-quantal
static void pm_tmr_timer(ACPIREGS *ar)
{
    PIIX4PMState *s = container_of(ar, PIIX4PMState, ar);
    pm_update_sci(s);
}
コード例 #8
0
ファイル: aspeed_timer.c プロジェクト: Dovgalyuk/qemu
/**
 * Avoid mutual references between AspeedTimerCtrlState and AspeedTimer
 * structs, as it's a waste of memory. The ptimer BH callback needs to know
 * whether a specific AspeedTimer is enabled, but this information is held in
 * AspeedTimerCtrlState. So, provide a helper to hoist ourselves from an
 * arbitrary AspeedTimer to AspeedTimerCtrlState.
 */
static inline AspeedTimerCtrlState *timer_to_ctrl(AspeedTimer *t)
{
    const AspeedTimer (*timers)[] = (void *)t - (t->id * sizeof(*t));
    return container_of(timers, AspeedTimerCtrlState, timers);
}
コード例 #9
0
status_t Camera3OutputStream::returnBufferCheckedLocked(
            const camera3_stream_buffer &buffer,
            nsecs_t timestamp,
            bool output,
            /*out*/
            sp<Fence> *releaseFenceOut) {

    (void)output;
    ALOG_ASSERT(output, "Expected output to be true");

    status_t res;
    sp<Fence> releaseFence;

    /**
     * Fence management - calculate Release Fence
     */
    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
        if (buffer.release_fence != -1) {
            ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
                  "there is an error", __FUNCTION__, mId, buffer.release_fence);
            close(buffer.release_fence);
        }

        /**
         * Reassign release fence as the acquire fence in case of error
         */
        releaseFence = new Fence(buffer.acquire_fence);
    } else {
        res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
        if (res != OK) {
            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
                  __FUNCTION__, mId, strerror(-res), res);
            return res;
        }

        releaseFence = new Fence(buffer.release_fence);
    }

    int anwReleaseFence = releaseFence->dup();

    /**
     * Release the lock briefly to avoid deadlock with
     * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
     * thread will go into StreamingProcessor::onFrameAvailable) during
     * queueBuffer
     */
    sp<ANativeWindow> currentConsumer = mConsumer;
    mLock.unlock();

    /**
     * Return buffer back to ANativeWindow
     */
    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
        // Cancel buffer
        res = currentConsumer->cancelBuffer(currentConsumer.get(),
                container_of(buffer.buffer, ANativeWindowBuffer, handle),
                anwReleaseFence);
        if (res != OK) {
            ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
        }
    } else {
        res = currentConsumer->queueBuffer(currentConsumer.get(),
                container_of(buffer.buffer, ANativeWindowBuffer, handle),
                anwReleaseFence);
        if (res != OK) {
            ALOGE("%s: Stream %d: Error queueing buffer to native window: "
                  "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
        }
    }
    mLock.lock();
    if (res != OK) {
        close(anwReleaseFence);
        return res;
    }

    *releaseFenceOut = releaseFence;

    return OK;
}
コード例 #10
0
ファイル: kqueue.c プロジェクト: DrumTechnologiesLtd/node
/* Called by libev, don't touch. */
void uv__kqueue_hack(EV_P_ int fflags, ev_io *w) {
  uv_fs_event_t* handle;

  handle = container_of(w, uv_fs_event_t, event_watcher);
  handle->fflags = fflags;
}
コード例 #11
0
static inline xnshm_a_t *link2shma(xnholder_t *ln)
{
	return ln ? container_of(ln, xnshm_a_t, link) : NULL;
}
コード例 #12
0
static void usbVcpEndWrite(serialPort_t *instance)
{
    vcpPort_t *port = container_of(instance, vcpPort_t, port);
    port->buffering = false;
    usbVcpFlush(port);
}
コード例 #13
0
static void usbVcpBeginWrite(serialPort_t *instance)
{
    vcpPort_t *port = container_of(instance, vcpPort_t, port);
    port->buffering = true;
}
コード例 #14
0
ファイル: getaddrinfo.c プロジェクト: DavidCai1993/node
/*
 * Called from uv_run when complete. Call user specified callback
 * then free returned addrinfo
 * Returned addrinfo strings are converted from UTF-16 to UTF-8.
 *
 * To minimize allocation we calculate total size required,
 * and copy all structs and referenced strings into the one block.
 * Each size calculation is adjusted to avoid unaligned pointers.
 */
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
  uv_getaddrinfo_t* req;
  int addrinfo_len = 0;
  int name_len = 0;
  size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo));
  struct addrinfoW* addrinfow_ptr;
  struct addrinfo* addrinfo_ptr;
  char* alloc_ptr = NULL;
  char* cur_ptr = NULL;

  req = container_of(w, uv_getaddrinfo_t, work_req);

  /* release input parameter memory */
  uv__free(req->alloc);
  req->alloc = NULL;

  if (status == UV_ECANCELED) {
    assert(req->retcode == 0);
    req->retcode = UV_EAI_CANCELED;
    goto complete;
  }

  if (req->retcode == 0) {
    /* convert addrinfoW to addrinfo */
    /* first calculate required length */
    addrinfow_ptr = req->addrinfow;
    while (addrinfow_ptr != NULL) {
      addrinfo_len += addrinfo_struct_len +
          ALIGNED_SIZE(addrinfow_ptr->ai_addrlen);
      if (addrinfow_ptr->ai_canonname != NULL) {
        name_len = WideCharToMultiByte(CP_UTF8,
                                       0,
                                       addrinfow_ptr->ai_canonname,
                                       -1,
                                       NULL,
                                       0,
                                       NULL,
                                       NULL);
        if (name_len == 0) {
          req->retcode = uv_translate_sys_error(GetLastError());
          goto complete;
        }
        addrinfo_len += ALIGNED_SIZE(name_len);
      }
      addrinfow_ptr = addrinfow_ptr->ai_next;
    }

    /* allocate memory for addrinfo results */
    alloc_ptr = (char*)uv__malloc(addrinfo_len);

    /* do conversions */
    if (alloc_ptr != NULL) {
      cur_ptr = alloc_ptr;
      addrinfow_ptr = req->addrinfow;

      while (addrinfow_ptr != NULL) {
        /* copy addrinfo struct data */
        assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len);
        addrinfo_ptr = (struct addrinfo*)cur_ptr;
        addrinfo_ptr->ai_family = addrinfow_ptr->ai_family;
        addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype;
        addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol;
        addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags;
        addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen;
        addrinfo_ptr->ai_canonname = NULL;
        addrinfo_ptr->ai_addr = NULL;
        addrinfo_ptr->ai_next = NULL;

        cur_ptr += addrinfo_struct_len;

        /* copy sockaddr */
        if (addrinfo_ptr->ai_addrlen > 0) {
          assert(cur_ptr + addrinfo_ptr->ai_addrlen <=
                 alloc_ptr + addrinfo_len);
          memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen);
          addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr;
          cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen);
        }

        /* convert canonical name to UTF-8 */
        if (addrinfow_ptr->ai_canonname != NULL) {
          name_len = WideCharToMultiByte(CP_UTF8,
                                         0,
                                         addrinfow_ptr->ai_canonname,
                                         -1,
                                         NULL,
                                         0,
                                         NULL,
                                         NULL);
          assert(name_len > 0);
          assert(cur_ptr + name_len <= alloc_ptr + addrinfo_len);
          name_len = WideCharToMultiByte(CP_UTF8,
                                         0,
                                         addrinfow_ptr->ai_canonname,
                                         -1,
                                         cur_ptr,
                                         name_len,
                                         NULL,
                                         NULL);
          assert(name_len > 0);
          addrinfo_ptr->ai_canonname = cur_ptr;
          cur_ptr += ALIGNED_SIZE(name_len);
        }
        assert(cur_ptr <= alloc_ptr + addrinfo_len);

        /* set next ptr */
        addrinfow_ptr = addrinfow_ptr->ai_next;
        if (addrinfow_ptr != NULL) {
          addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr;
        }
      }
      req->addrinfo = (struct addrinfo*)alloc_ptr;
    } else {
      req->retcode = UV_EAI_MEMORY;
    }
  }

  /* return memory to system */
  if (req->addrinfow != NULL) {
    FreeAddrInfoW(req->addrinfow);
    req->addrinfow = NULL;
  }

complete:
  uv__req_unregister(req->loop, req);

  /* finally do callback with converted result */
  if (req->getaddrinfo_cb)
    req->getaddrinfo_cb(req, req->retcode, req->addrinfo);
}
コード例 #15
0
/*****************************************************************************
* FUNCTION
*  hal_dma_send_data
* DESCRIPTION
*  send data through btif in DMA mode
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN]        pointer to rx data buffer
* max_len  [IN]        tx buffer length
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_dma_send_data(P_MTK_DMA_INFO_STR p_dma_info,
		      const unsigned char *p_buf, const unsigned int buf_len)
{
	unsigned int i_ret = -1;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	unsigned int len_to_send = buf_len;
	unsigned int ava_len = 0;
	unsigned int wpt = 0;
	unsigned int last_wpt_wrap = 0;
	unsigned int vff_size = 0;
	unsigned char *p_data = (unsigned char *)p_buf;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);

	BTIF_TRC_FUNC();
	if ((NULL == p_buf) || (0 == buf_len)) {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid parameters, p_buf:0x%08x, buf_len:%d\n",
			      p_buf, buf_len);
		return i_ret;
	}
/*check if tx dma in flush operation? if yes, should wait until DMA finish flush operation*/
/*currently uplayer logic will make sure this pre-condition*/
/*disable Tx IER, in case Tx irq happens, flush bit may be set in irq handler*/
	btif_tx_dma_ier_ctrl(p_dma_info, false);

	vff_size = p_mtk_vfifo->vfifo.vfifo_size;
	ava_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base));
	wpt = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_MASK;
	last_wpt_wrap = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_WRAP;

/*copy data to vFIFO, Note: ava_len should always large than buf_len, otherwise common logic layer will not call hal_dma_send_data*/
	if (buf_len > ava_len) {
		BTIF_ERR_FUNC
		    ("length to send:(%d) < length available(%d), abnormal!!!---!!!\n",
		     buf_len, ava_len);
		BUG_ON(buf_len > ava_len);	/* this will cause kernel panic */
	}

	len_to_send = buf_len < ava_len ? buf_len : ava_len;
	if (len_to_send + wpt >= vff_size) {
		unsigned int tail_len = vff_size - wpt;
		memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt), p_data, tail_len);
		p_data += tail_len;
		memcpy(p_mtk_vfifo->vfifo.p_vir_addr,
		       p_data, len_to_send - tail_len);
/*make sure all data write to memory area tx vfifo locates*/
		dsb();

/*calculate WPT*/
		wpt = wpt + len_to_send - vff_size;
		last_wpt_wrap ^= DMA_WPT_WRAP;
	} else {
		memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt),
		       p_data, len_to_send);
/*make sure all data write to memory area tx vfifo locates*/
		dsb();

/*calculate WPT*/
		wpt += len_to_send;
	}
	p_mtk_vfifo->wpt = wpt;
	p_mtk_vfifo->last_wpt_wrap = last_wpt_wrap;

/*make sure tx dma is allowed(tx flush bit is not set) to use before update WPT*/
	if (hal_dma_is_tx_allow(p_dma_info)) {
/*make sure tx dma enabled*/
		hal_btif_dma_ctrl(p_dma_info, DMA_CTRL_ENABLE);

/*update WTP to Tx DMA controller's control register*/
		btif_reg_sync_writel(wpt | last_wpt_wrap, TX_DMA_VFF_WPT(base));

		if ((8 > BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base))) &&
		    (0 < BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)))) {
/*0 < valid size in Tx vFIFO < 8 && TX Flush is not in process<should always be done>? if yes, set flush bit to DMA*/
			_tx_dma_flush(p_dma_info);
		}
		i_ret = len_to_send;
	} else {
/*TODO: print error log*/
		BTIF_ERR_FUNC
		    ("Tx DMA flush operation is in process, this case should never happen, please check if tx operation is allowed before call this API\n");
/*if flush operation is in process , we will return 0*/
		i_ret = 0;
	}

/*Enable Tx IER*/
	btif_tx_dma_ier_ctrl(p_dma_info, true);

	BTIF_TRC_FUNC();
	return i_ret;
}
コード例 #16
0
ファイル: threadpool.c プロジェクト: 7designstudios/node
static void uv__queue_work(struct uv__work* w) {
  uv_work_t* req = container_of(w, uv_work_t, work_req);

  req->work_cb(req);
}
コード例 #17
0
/*****************************************************************************
* FUNCTION
*  hal_rx_dma_irq_handler
* DESCRIPTION
*  lower level rx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info,
			   unsigned char *p_buf, const unsigned int max_len)
{
	int i_ret = -1;
	unsigned int valid_len = 0;
	unsigned int wpt_wrap = 0;
	unsigned int rpt_wrap = 0;
	unsigned int wpt = 0;
	unsigned int rpt = 0;
	unsigned int tail_len = 0;
	unsigned int real_len = 0;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	dma_rx_buf_write rx_cb = p_dma_info->rx_cb;
	unsigned char *p_vff_buf = NULL;
	unsigned char *vff_base = p_vfifo->p_vir_addr;
	unsigned int vff_size = p_vfifo->vfifo_size;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);
	unsigned long flag = 0;

	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
#if MTK_BTIF_ENABLE_CLK_CTL
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n",
			      __FILE__);
		return i_ret;
	}
#endif
/*disable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, false);

/*clear Rx DMA's interrupt status*/
	BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE);

	valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));
	rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
	wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	if ((0 == valid_len) && (rpt == wpt)) {
		BTIF_DBG_FUNC
		    ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n",
		     rpt, wpt);
	}

	i_ret = 0;

	while ((0 < valid_len) || (rpt != wpt)) {
		rpt_wrap = rpt & DMA_RPT_WRAP;
		wpt_wrap = wpt & DMA_WPT_WRAP;
		rpt &= DMA_RPT_MASK;
		wpt &= DMA_WPT_MASK;

/*calcaute length of available data  in vFIFO*/
		if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) {
			real_len = wpt + vff_size - rpt;
		} else {
			real_len = wpt - rpt;
		}

		if (NULL != rx_cb) {
			tail_len = vff_size - rpt;
			p_vff_buf = vff_base + rpt;
			if (tail_len >= real_len) {
				(*rx_cb) (p_dma_info, p_vff_buf, real_len);
			} else {
				(*rx_cb) (p_dma_info, p_vff_buf, tail_len);
				p_vff_buf = vff_base;
				(*rx_cb) (p_dma_info, p_vff_buf, real_len -
					  tail_len);
			}
			i_ret += real_len;
		} else {
			BTIF_ERR_FUNC
			    ("no rx_cb found, please check your init process\n");
		}
		dsb();
		rpt += real_len;
		if (rpt >= vff_size) {
/*read wrap bit should be revert*/
			rpt_wrap ^= DMA_RPT_WRAP;
			rpt %= vff_size;
		}
		rpt |= rpt_wrap;
/*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/
		p_mtk_vfifo->wpt = wpt;
		p_mtk_vfifo->last_wpt_wrap = wpt_wrap;

		p_mtk_vfifo->rpt = rpt;
		p_mtk_vfifo->last_rpt_wrap = rpt_wrap;

/*update rpt information to DMA controller*/
		btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base));

/*get vff valid size again and check if rx data is processed completely*/
		valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));

		rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
		wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	}

/*enable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, true);
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
コード例 #18
0
ファイル: s3cfb_fimd5x.c プロジェクト: IoveSunny/DreamBox
int s3cfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
	s3cfb_info_t *fbi = container_of(info, s3cfb_info_t, fb);
	s3cfb_win_info_t win_info;
	s3cfb_color_key_info_t colkey_info;
	s3cfb_color_val_info_t colval_info;
	s3cfb_dma_info_t dma_info;
	s3cfb_next_info_t next_fb_info;
	struct fb_var_screeninfo *var= &fbi->fb.var;
	unsigned int crt, alpha_level, alpha_mode;

/* should be fixed for c100 */
#if defined(CONFIG_S3C6410_PWM) || defined(CONFIG_S5PC1XX_PWM)
	int brightness;
#endif

#if defined(CONFIG_FB_S3C_EXT_DOUBLE_BUFFERING)
	unsigned int f_num_val;
#endif

#if defined(CONFIG_FB_S3C_EXT_VIRTUAL_SCREEN)
	s3cfb_vs_info_t vs_info;
#endif

	switch(cmd){
	case S3CFB_GET_INFO:
		dma_info.map_dma_f1 = fbi->map_dma_f1;
		dma_info.map_dma_f2 = fbi->map_dma_f2;

		if(copy_to_user((void *) arg, (const void *) &dma_info, sizeof(s3cfb_dma_info_t)))
			return -EFAULT;
		break;

	case S3CFB_OSD_SET_INFO:
		if (copy_from_user(&win_info, (s3cfb_win_info_t *) arg, sizeof(s3cfb_win_info_t)))
			return -EFAULT;

		s3cfb_init_win(fbi, win_info.bpp, win_info.left_x, win_info.top_y, win_info.width, win_info.height, OFF);
		break;

	case S3CFB_OSD_START:
		s3cfb_onoff_win(fbi, ON);
		break;

	case S3CFB_OSD_STOP:
		s3cfb_onoff_win(fbi, OFF);
		break;

	case S3CFB_OSD_ALPHA_UP:
		alpha_level = readl(S3C_VIDOSD0C + (0x10 * fbi->win_id)) & 0xf;

		if (alpha_level < S3CFB_MAX_ALPHA_LEVEL)
			alpha_level++;

		s3cfb_set_alpha_level(fbi, alpha_level, 1);
		break;

	case S3CFB_OSD_ALPHA_DOWN:
		alpha_level = readl(S3C_VIDOSD0C + (0x10 * fbi->win_id)) & 0xf;

		if (alpha_level > 0)
			alpha_level--;

		s3cfb_set_alpha_level(fbi, alpha_level, 1);
		break;

	case S3CFB_OSD_ALPHA0_SET:
		alpha_level = (unsigned int) arg;

		if (alpha_level > S3CFB_MAX_ALPHA_LEVEL)
			alpha_level = S3CFB_MAX_ALPHA_LEVEL;

		s3cfb_set_alpha_level(fbi, alpha_level, 0);
		break;

	case S3CFB_OSD_ALPHA1_SET:
		alpha_level = (unsigned int) arg;

		if (alpha_level > S3CFB_MAX_ALPHA_LEVEL)
			alpha_level = S3CFB_MAX_ALPHA_LEVEL;

		s3cfb_set_alpha_level(fbi, alpha_level, 1);
		break;

	case S3CFB_OSD_ALPHA_MODE:
		alpha_mode = (unsigned int) arg;
		s3cfb_set_alpha_mode(fbi, alpha_mode);
		break;

	case S3CFB_OSD_MOVE_LEFT:
		if (var->xoffset > 0)
			var->xoffset--;

		s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres);
		break;

	case S3CFB_OSD_MOVE_RIGHT:
		if (var->xoffset < (s3cfb_fimd.width - var->xres))
			var->xoffset++;

		s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres);
		break;

	case S3CFB_OSD_MOVE_UP:
		if (var->yoffset > 0)
			var->yoffset--;

		s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres);
		break;

	case S3CFB_OSD_MOVE_DOWN:
		if (var->yoffset < (s3cfb_fimd.height - var->yres))
			var->yoffset++;

		s3cfb_set_win_position(fbi, var->xoffset, var->yoffset, var->xres, var->yres);
		break;

	case FBIO_WAITFORVSYNC:
		if (get_user(crt, (unsigned int __user *)arg))
			return -EFAULT;

		return s3cfb_wait_for_vsync();

	case S3CFB_COLOR_KEY_START:
		s3cfb_onoff_color_key(fbi, ON);
		break;

	case S3CFB_COLOR_KEY_STOP:
		s3cfb_onoff_color_key(fbi, OFF);
		break;

	case S3CFB_COLOR_KEY_ALPHA_START:
		s3cfb_onoff_color_key_alpha(fbi, ON);
		break;

	case S3CFB_COLOR_KEY_ALPHA_STOP:
		s3cfb_onoff_color_key_alpha(fbi, OFF);
		break;

	case S3CFB_COLOR_KEY_SET_INFO:
		if (copy_from_user(&colkey_info, (s3cfb_color_val_info_t *) arg, sizeof(s3cfb_color_val_info_t)))
			return -EFAULT;

		s3cfb_set_color_key_registers(fbi, colkey_info);
		break;

	case S3CFB_COLOR_KEY_VALUE:
		if (copy_from_user(&colval_info, (s3cfb_color_val_info_t *) arg, sizeof(s3cfb_color_val_info_t)))
			return -EFAULT;

		s3cfb_set_color_value(fbi, colval_info);
		break;

	case S3CFB_SET_VSYNC_INT:
		s3cfb_fimd.vidintcon0 &= ~S3C_VIDINTCON0_FRAMESEL0_MASK;
		s3cfb_fimd.vidintcon0 |= S3C_VIDINTCON0_FRAMESEL0_VSYNC;

		if (arg)
			s3cfb_fimd.vidintcon0 |= S3C_VIDINTCON0_INTFRMEN_ENABLE;
		else
			s3cfb_fimd.vidintcon0 &= ~S3C_VIDINTCON0_INTFRMEN_ENABLE;

		writel(s3cfb_fimd.vidintcon0, S3C_VIDINTCON0);
		break;

	case S3CFB_SET_NEXT_FB_INFO:
		if (copy_from_user(&next_fb_info, (s3cfb_next_info_t *) arg, sizeof(s3cfb_next_info_t)))
			return -EFAULT;

		/* check arguments */
		if ((next_fb_info.xres + next_fb_info.xoffset) > next_fb_info.xres_virtual ||
			(next_fb_info.yres + next_fb_info.yoffset) > next_fb_info.yres_virtual ||
			(next_fb_info.xres + next_fb_info.lcd_offset_x ) > s3cfb_fimd.width ||
			(next_fb_info.yres + next_fb_info.lcd_offset_y ) > s3cfb_fimd.height)
			return -EINVAL;

		fbi->next_fb_info = next_fb_info;
		fbi->next_fb_info_change_req = 1;
		break;

	case S3CFB_GET_CURR_FB_INFO:
		next_fb_info.phy_start_addr = fbi->fb.fix.smem_start;
		next_fb_info.xres = fbi->fb.var.xres;
		next_fb_info.yres = fbi->fb.var.yres;
		next_fb_info.xres_virtual = fbi->fb.var.xres_virtual;
		next_fb_info.yres_virtual = fbi->fb.var.yres_virtual;
		next_fb_info.xoffset = fbi->fb.var.xoffset;
		next_fb_info.yoffset = fbi->fb.var.yoffset;
		next_fb_info.lcd_offset_x = fbi->lcd_offset_x;
		next_fb_info.lcd_offset_y = fbi->lcd_offset_y;

		if (copy_to_user((void *)arg, (s3cfb_next_info_t *) &next_fb_info, sizeof(s3cfb_next_info_t)))
			return -EFAULT;
		break;

	case S3CFB_GET_BRIGHTNESS:
		if (copy_to_user((void *)arg, (const void *) &s3cfb_fimd.brightness, sizeof(int)))
			return -EFAULT;
		break;

/* should be fixed for c100 */
#if defined(CONFIG_S3C6410_PWM) || defined(CONFIG_S5PC1XX_PWM)
	case S3CFB_SET_BRIGHTNESS:
		if (copy_from_user(&brightness, (int *) arg, sizeof(int)))
			return -EFAULT;

		s3cfb_set_brightness(brightness);
		break;
#endif

#if defined(CONFIG_FB_S3C_EXT_VIRTUAL_SCREEN)
	case S3CFB_VS_START:
		s3cfb_fimd.wincon0 &= ~(S3C_WINCONx_ENWIN_F_ENABLE);
		writel(s3cfb_fimd.wincon0 | S3C_WINCONx_ENWIN_F_ENABLE, S3C_WINCON0);

		fbi->fb.var.xoffset = s3cfb_fimd.xoffset;
		fbi->fb.var.yoffset = s3cfb_fimd.yoffset;
		break;

	case S3CFB_VS_STOP:
		s3cfb_fimd.vidw00add0b0 = fbi->screen_dma_f1;
		s3cfb_fimd.vidw00add0b1 = fbi->screen_dma_f2;
		fbi->fb.var.xoffset = 0;
		fbi->fb.var.yoffset = 0;

		writel(s3cfb_fimd.vidw00add0b0, S3C_VIDW00ADD0B0);
		writel(s3cfb_fimd.vidw00add0b1, S3C_VIDW00ADD0B1);

		break;

	case S3CFB_VS_SET_INFO:
		if (copy_from_user(&vs_info, (s3cfb_vs_info_t *) arg, sizeof(s3cfb_vs_info_t)))
			return -EFAULT;

		if (s3cfb_set_vs_info(vs_info)) {
			printk("Error S3CFB_VS_SET_INFO\n");
			return -EINVAL;
		}

		s3cfb_set_vs_registers(S3CFB_VS_SET);

		fbi->fb.var.xoffset = s3cfb_fimd.xoffset;
		fbi->fb.var.yoffset = s3cfb_fimd.yoffset;
		break;

	case S3CFB_VS_MOVE:
		s3cfb_set_vs_registers(arg);

		fbi->fb.var.xoffset = s3cfb_fimd.xoffset;
		fbi->fb.var.yoffset = s3cfb_fimd.yoffset;
		break;
#endif

#if defined(CONFIG_FB_S3C_EXT_DOUBLE_BUFFERING)
	case S3CFB_GET_NUM:
		if (copy_from_user((void *)&f_num_val, (const void *)arg, sizeof(u_int)))
			return -EFAULT;

		if (copy_to_user((void *)arg, (const void *) &f_num_val, sizeof(u_int)))
			return -EFAULT;

		break;

	case S3CFB_CHANGE_REQ:
		s3cfb_change_buff(0, (int) arg);
		break;
#endif

	default:
		return -EINVAL;
	}

	return 0;
}
コード例 #19
0
ファイル: virtio-ccw.c プロジェクト: DVSB/qemu
/* DeviceState to VirtioCcwDevice. Note: used on datapath,
 * be careful and test performance if you change this.
 */
static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
{
    return container_of(d, VirtioCcwDevice, parent_obj);
}
コード例 #20
0
static void pm_tmr_timer(ACPIPMTimer *tmr)
{
    PIIX4PMState *s = container_of(tmr, PIIX4PMState, tmr);
    pm_update_sci(s);
}
コード例 #21
0
static int
gst_droidcamsrc_stream_window_enqueue_buffer (struct preview_stream_ops *w,
    buffer_handle_t * buffer)
{
  GstDroidCamSrcStreamWindow *win;
  GstDroidCamSrc *src;
  GstBuffer *buff;
  int ret;
  GstVideoCropMeta *meta;

  GST_DEBUG ("enqueue buffer %p", buffer);

  win = container_of (w, GstDroidCamSrcStreamWindow, window);

  g_mutex_lock (&win->lock);

  src = GST_DROIDCAMSRC (GST_PAD_PARENT (win->pad->pad));

  buff = gst_droidcamsrc_stream_window_get_buffer (buffer);

  if (!buff) {
    GST_ERROR ("no buffer corresponding to handle %p", buffer);
    ret = -1;
    goto unlock_and_out;
  }

  /* if the buffer pool is not our current pool then just release it */
  if (buff->pool != GST_BUFFER_POOL (win->pool)) {
    GST_DEBUG ("releasing old buffer %p", buffer);
    gst_buffer_unref (buff);
    ret = 0;
    goto unlock_and_out;
  }

  /* now update crop meta */
  meta = gst_buffer_get_video_crop_meta (buff);
  meta->x = win->left;
  meta->y = win->top;
  meta->width = win->right - win->left;
  meta->height = win->bottom - win->top;

  GST_LOG
      ("window width = %d, height = %d, crop info: left = %d, top = %d, right = %d, bottom = %d",
      win->width, win->height, win->left, win->top, win->right, win->bottom);

  g_mutex_unlock (&win->lock);

  /* it should be safe to access that variable without locking.
   * pad gets activated during READY_TO_PAUSED and deactivated during
   * PAUSED_TO_READY while we start the preview during PAUSED_TO_PLAYING
   * and stop it during PLAYING_TO_PAUSED.
   */
  if (!win->pad->running) {
    gst_buffer_unref (buff);
    GST_DEBUG ("unreffing buffer because pad task is not running");
    ret = 0;
    goto unlock_pad_and_out;
  }
  // TODO: duration, offset, offset_end ...
  gst_droidcamsrc_timestamp (src, buff);

  g_mutex_lock (&win->pad->queue_lock);

  g_queue_push_tail (win->pad->queue, buff);

  g_cond_signal (&win->pad->cond);

  ret = 0;
  goto unlock_pad_and_out;

unlock_and_out:
  g_mutex_unlock (&win->lock);

  return ret;

unlock_pad_and_out:
  g_mutex_unlock (&win->pad->queue_lock);

  return ret;
}
コード例 #22
0
ファイル: xasum.c プロジェクト: BenjaminCoquelle/clBLAS
clblasStatus
doAsum(
	CLBlasKargs *kargs,
    size_t N,
    cl_mem asum,
    size_t offAsum,
    const cl_mem X,
    size_t offx,
    int incx,
    cl_mem scratchBuff,
    cl_uint numCommandQueues,
    cl_command_queue *commandQueues,
    cl_uint numEventsInWaitList,
    const cl_event *eventWaitList,
    cl_event *events)
{
        cl_int err;
		ListHead seq, seq2;
        clblasStatus retCode = clblasSuccess;
        cl_event firstAsumCall;
        CLBlasKargs redctnArgs;
        ListNode *listNodePtr;
        SolutionStep *step;

        DataType asumType = (kargs->dtype == TYPE_COMPLEX_FLOAT) ? TYPE_FLOAT:
                                ((kargs->dtype == TYPE_COMPLEX_DOUBLE) ? TYPE_DOUBLE: kargs->dtype);

		if (!clblasInitialized) {
        return clblasNotInitialized;
		}

		/* Validate arguments */

		retCode = checkMemObjects(scratchBuff, asum, X, true, X_VEC_ERRSET, X_VEC_ERRSET, X_VEC_ERRSET );
		if (retCode) {
			printf("Invalid mem object..\n");
            return retCode;
		}

		// Check wheather enough memory was allocated

		if ((retCode = checkVectorSizes(kargs->dtype, N, X, offx, incx, X_VEC_ERRSET ))) {
			printf("Invalid Size for X\n");
            return retCode;
		}
		// Minimum size of scratchBuff is N
		if ((retCode = checkVectorSizes(kargs->dtype, N, scratchBuff, 0, 1, X_VEC_ERRSET ))) {
			printf("Insufficient ScratchBuff\n");
            return retCode;
		}

		if ((retCode = checkVectorSizes(asumType, 1, asum, offAsum, 1, X_VEC_ERRSET ))) {
			printf("Invalid Size for asum\n");
            return retCode;
		}
		///////////////////////////////////////////////////////////////

		if ((commandQueues == NULL) || (numCommandQueues == 0))
		{
			return clblasInvalidValue;
		}

		/* numCommandQueues will be hardcoded to 1 as of now. No multi-gpu support */
		numCommandQueues = 1;
		if (commandQueues[0] == NULL)
		{
			return clblasInvalidCommandQueue;
		}

		if ((numEventsInWaitList !=0) && (eventWaitList == NULL))
		{
			return clblasInvalidEventWaitList;
		}

		kargs->N = N;
		kargs->A = asum;
        kargs->offA = offAsum;
		kargs->B = X;
		kargs->offBX = offx;
		kargs->ldb.vector = incx;   // Will be using this as incx
        if(incx <1){
            kargs->N = 1;
        }
        kargs->D = scratchBuff;
        kargs->redctnType = REDUCE_BY_SUM;
        memcpy(&redctnArgs, kargs, sizeof(CLBlasKargs));

        redctnArgs.dtype = asumType;

		listInitHead(&seq);
		err = makeSolutionSeq(CLBLAS_ASUM, kargs, numCommandQueues, commandQueues,
        					  numEventsInWaitList, eventWaitList, &firstAsumCall, &seq);
		if (err == CL_SUCCESS)
        {
            /** The second kernel call needs to know the number of work-groups used
                in the first kernel call. This number of work-groups is calculated here
                and passed as N to second reduction kernel
            **/
            err = executeSolutionSeq(&seq);
            if (err == CL_SUCCESS)
            {
                listNodePtr = listNodeFirst(&seq);        // Get the node
                step = container_of(listNodePtr, node, SolutionStep);

                redctnArgs.N = step->pgran.numWGSpawned[0];     // 1D block was used

                listInitHead(&seq2);
                err = makeSolutionSeq(CLBLAS_REDUCTION_EPILOGUE, &redctnArgs, numCommandQueues, commandQueues,
                           1, &firstAsumCall, events, &seq2);

                if (err == CL_SUCCESS)
                {
                    err = executeSolutionSeq(&seq2);
                }
                freeSolutionSeq(&seq2);
            }
		}

		freeSolutionSeq(&seq);
		return (clblasStatus)err;
}
コード例 #23
0
void uv__stream_osx_cb_close(uv_handle_t* async) {
  /* Free container */
  free(container_of(async, uv__stream_select_t, async));
}
コード例 #24
0
ファイル: pipe.c プロジェクト: broketech/akaros
static void pipe_release(struct kref *kref)
{
	Pipe *pipe = container_of(kref, Pipe, ref);
	freepipe(pipe);
}
コード例 #25
0
/**
 * This function is used to submit an I/O Request to an EP.
 *
 *	- When the request completes the request's completion callback
 *	  is called to return the request to the driver.
 *	- An EP, except control EPs, may have multiple requests
 *	  pending.
 *	- Once submitted the request cannot be examined or modified.
 *	- Each request is turned into one or more packets.
 *	- A BULK EP can queue any amount of data; the transfer is
 *	  packetized.
 *	- Zero length Packets are specified with the request 'zero'
 *	  flag.
 */
static int dwc_otg_pcd_ep_queue(struct usb_ep *_ep,
				struct usb_request *_req, gfp_t _gfp_flags)
{
	int prevented = 0;
	dwc_otg_pcd_request_t *req;
	dwc_otg_pcd_ep_t *ep;
	dwc_otg_pcd_t *pcd;
	unsigned long flags = 0;
	int ep_in_pass = 1;

	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n",
		    __func__, _ep, _req, _gfp_flags);

	req = container_of(_req, dwc_otg_pcd_request_t, req);
	if (!_req || !_req->complete || !_req->buf || !list_empty(&req->queue)) {
		DWC_WARN("%s, bad params\n", __func__);
		return -EINVAL;
	}

	ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
	if (!_ep || (!ep->desc && ep->dwc_ep.num != 0)) {
		DWC_WARN("%s, bad ep\n", __func__);
		return -EINVAL;
	}
	pcd = ep->pcd;
	if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
		DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
		DWC_WARN("%s, bogus device state\n", __func__);
		return -ESHUTDOWN;
	}

	DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n",
		    _ep->name, _req, _req->length, _req->buf);

	if (!GET_CORE_IF(pcd)->core_params->opt) {
		if (ep->dwc_ep.num != 0) {
			DWC_ERROR("%s queue req %p, len %d buf %p\n",
				  _ep->name, _req, _req->length, _req->buf);
		}
	}

	SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);

#if defined(DEBUG) & defined(VERBOSE)
	dump_msg(_req->buf, _req->length);
#endif

	_req->status = -EINPROGRESS;
	_req->actual = 0;

	/* 
	 * For EP0 IN without premature status, zlp is required?
	 */
	if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
		DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", _ep->name);
		//_req->zero = 1;
	}

	if(ep->dwc_ep.num && ep->dwc_ep.is_in && pcd->ep_in_sync)
		ep_in_pass = 0;
	if(GET_CORE_IF(pcd)->en_multiple_tx_fifo || !GET_CORE_IF(pcd)->dma_enable)
		ep_in_pass = 1;

	/* Start the transfer */
	if (list_empty(&ep->queue) && !ep->stopped && ep_in_pass) {
		if(GET_CORE_IF(pcd)->dma_enable){
			dwc_otg_pcd_dma_map(&ep->dwc_ep, _req);
		}

		if(ep->dwc_ep.is_in && ep->dwc_ep.num){
			pcd->ep_in_sync = ep->dwc_ep.num;
		}

		/* EP0 Transfer? */
		if (ep->dwc_ep.num == 0) {
			switch (pcd->ep0state) {
			case EP0_IN_DATA_PHASE:
				DWC_DEBUGPL(DBG_PCD,
					    "%s ep0: EP0_IN_DATA_PHASE\n",
					    __func__);
				break;

			case EP0_OUT_DATA_PHASE:
				DWC_DEBUGPL(DBG_PCD,
					    "%s ep0: EP0_OUT_DATA_PHASE\n",
					    __func__);
				if (pcd->request_config || _req->length == 0) {
					/* Complete STATUS PHASE */
					ep->dwc_ep.is_in = 1;
					pcd->ep0state = EP0_STATUS;
				}
				break;

			default:
				DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
					    pcd->ep0state);
				SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
				return -EL2HLT;
			}

			//ep->dwc_ep.dma_addr = _req->dma;
			ep->dwc_ep.start_xfer_buff = _req->buf;
			ep->dwc_ep.xfer_buff = _req->buf;
			ep->dwc_ep.xfer_len = _req->length;
			ep->dwc_ep.xfer_count = 0;
			ep->dwc_ep.sent_zlp = 0;
			ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
			dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
						   &ep->dwc_ep);
		} else {
			/* Setup and start the Transfer */
			//ep->dwc_ep.dma_addr = _req->dma; //
			ep->dwc_ep.start_xfer_buff = _req->buf;
			ep->dwc_ep.xfer_buff = _req->buf;
			ep->dwc_ep.xfer_len = _req->length;
			ep->dwc_ep.xfer_count = 0;
			ep->dwc_ep.sent_zlp = 0;
			ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
			dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
						  &ep->dwc_ep);
		}
	}

	if ((req != 0) || prevented) {
		++pcd->request_pending;
		list_add_tail(&req->queue, &ep->queue);

		if(ep->dwc_ep.num && ep->dwc_ep.is_in)
			list_add_tail(&req->pcd_queue, &pcd->req_queue);

		if (ep->dwc_ep.is_in && ep->stopped
		    && !(GET_CORE_IF(pcd)->dma_enable)) {
			/** @todo NGS Create a function for this. */
			diepmsk_data_t diepmsk = {.d32 = 0 };
			diepmsk.b.intktxfemp = 1;
			dwc_modify_reg32(&GET_CORE_IF(pcd)->
					 dev_if->dev_global_regs->diepmsk, 0,
					 diepmsk.d32);
		}
	}
コード例 #26
0
ファイル: ich9.c プロジェクト: 32bitmicro/riscv-qemu
static void pm_powerdown_req(Notifier *n, void *opaque)
{
    ICH9LPCPMRegs *pm = container_of(n, ICH9LPCPMRegs, powerdown_notifier);

    acpi_pm1_evt_power_down(&pm->acpi_regs);
}
コード例 #27
0
ファイル: SSHRSA.C プロジェクト: TortoiseGit/TortoiseGit
static char *rsa2_cache_str(ssh_key *key)
{
    RSAKey *rsa = container_of(key, RSAKey, sshk);
    return rsastr_fmt(rsa);
}
コード例 #28
0
ファイル: ich9.c プロジェクト: 32bitmicro/riscv-qemu
static void ich9_pm_update_sci_fn(ACPIREGS *regs)
{
    ICH9LPCPMRegs *pm = container_of(regs, ICH9LPCPMRegs, acpi_regs);
    acpi_update_sci(&pm->acpi_regs, pm->irq);
}
コード例 #29
0
static QapiDeallocVisitor *to_qov(Visitor *v)
{
    return container_of(v, QapiDeallocVisitor, visitor);
}
コード例 #30
0
/**
 * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
 * command buffer, equal to the current number of mapped ranges.
 * The UPDATE_GB_IMAGE commands will be patched with the
 * actual ranges just before flush.
 */
static enum pipe_error
svga_buffer_upload_gb_command(struct svga_context *svga,
			      struct svga_buffer *sbuf)
{
   struct svga_winsys_context *swc = svga->swc;
   SVGA3dCmdUpdateGBImage *update_cmd;
   struct svga_3d_update_gb_image *whole_update_cmd = NULL;
   uint32 numBoxes = sbuf->map.num_ranges;
   struct pipe_resource *dummy;
   unsigned i;

   assert(svga_have_gb_objects(svga));
   assert(numBoxes);
   assert(sbuf->dma.updates == NULL);

   if (sbuf->dma.flags.discard) {
      struct svga_3d_invalidate_gb_image *cicmd = NULL;
      SVGA3dCmdInvalidateGBImage *invalidate_cmd;
      const unsigned total_commands_size =
         sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd);

      /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
       * 'numBoxes' UPDATE_GB_IMAGE commands.  Allocate all at once rather
       * than with separate commands because we need to properly deal with
       * filling the command buffer.
       */
      invalidate_cmd = SVGA3D_FIFOReserve(swc,
                                          SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
                                          total_commands_size, 1 + numBoxes);
      if (!invalidate_cmd)
	 return PIPE_ERROR_OUT_OF_MEMORY;

      cicmd = container_of(invalidate_cmd, cicmd, body);
      cicmd->header.size = sizeof(*invalidate_cmd);
      swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL, sbuf->handle,
                              (SVGA_RELOC_WRITE |
                               SVGA_RELOC_INTERNAL |
                               SVGA_RELOC_DMA));
      invalidate_cmd->image.face = 0;
      invalidate_cmd->image.mipmap = 0;

      /* The whole_update_command is a SVGA3dCmdHeader plus the
       * SVGA3dCmdUpdateGBImage command.
       */
      whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1];
      /* initialize the first UPDATE_GB_IMAGE command */
      whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
      update_cmd = &whole_update_cmd->body;

   } else {
      /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
      const unsigned total_commands_size =
         sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd);

      update_cmd = SVGA3D_FIFOReserve(swc,
                                      SVGA_3D_CMD_UPDATE_GB_IMAGE,
                                      total_commands_size, numBoxes);
      if (!update_cmd)
	 return PIPE_ERROR_OUT_OF_MEMORY;

      /* The whole_update_command is a SVGA3dCmdHeader plus the
       * SVGA3dCmdUpdateGBImage command.
       */
      whole_update_cmd = container_of(update_cmd, whole_update_cmd, body);
   }

   /* Init the first UPDATE_GB_IMAGE command */
   whole_update_cmd->header.size = sizeof(*update_cmd);
   swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle,
			   SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
   update_cmd->image.face = 0;
   update_cmd->image.mipmap = 0;

   /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
    * fill in the box info below.
    */
   sbuf->dma.updates = whole_update_cmd;

   /*
    * Copy the face, mipmap, etc. info to all subsequent commands.
    * Also do the surface relocation for each subsequent command.
    */
   for (i = 1; i < numBoxes; ++i) {
      whole_update_cmd++;
      memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd));

      swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL,
                              sbuf->handle,
                              SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
   }

   /* Increment reference count */
   sbuf->dma.svga = svga;
   dummy = NULL;
   pipe_resource_reference(&dummy, &sbuf->b.b);
   SVGA_FIFOCommitAll(swc);

   swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
   sbuf->dma.flags.discard = FALSE;

   svga->hud.num_resource_updates++;

   return PIPE_OK;
}