Пример #1
0
static int recv_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t buf_len, int flags)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	unsigned int q_len;
	unsigned int sz;
	u32 err;
	int res;

	/* Currently doesn't support receiving into multiple iovec entries */

	if (m->msg_iovlen != 1)
		return -EOPNOTSUPP;

	/* Catch invalid receive attempts */

	if (unlikely(!buf_len))
		return -EINVAL;

	if (sock->type == SOCK_SEQPACKET) {
		if (unlikely(sock->state == SS_UNCONNECTED))
			return -ENOTCONN;
		if (unlikely((sock->state == SS_DISCONNECTING) &&
			     (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
			return -ENOTCONN;
	}

	/* Look for a message in receive queue; wait if necessary */

	if (unlikely(down_interruptible(&tsock->sem)))
		return -ERESTARTSYS;

restart:
	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
		     (flags & MSG_DONTWAIT))) {
		res = -EWOULDBLOCK;
		goto exit;
	}

	if ((res = wait_event_interruptible(
		*sock->sk->sk_sleep,
		((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
		 (sock->state == SS_DISCONNECTING))) )) {
		goto exit;
	}

	/* Catch attempt to receive on an already terminated connection */
	/* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */

	if (!q_len) {
		res = -ENOTCONN;
		goto exit;
	}

	/* Get access to first message in receive queue */

	buf = skb_peek(&sock->sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Complete connection setup for an implied connect */

	if (unlikely(sock->state == SS_CONNECTING)) {
		if ((res = auto_connect(sock, tsock, msg)))
			goto exit;
	}

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_queue(tsock);
		goto restart;
	}

	/* Capture sender's address (optional) */

	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */

	if ((res = anc_data_recv(m, msg, tsock->p)))
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
		if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
					  sz))) {
			res = -EFAULT;
			goto exit;
		}
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
		advance_queue(tsock);
	}
exit:
	up(&tsock->sem);
	return res;
}
Пример #2
0
// -------------------------------------------------------------------------
ssize_t bks_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
    ssize_t bytes_transferred = 0;
    size_t bytes_to_copy = 0;
    unsigned long remain = 0;
    long ec = -1;
    struct BksDeviceContext* ctx = file->private_data;
    int nr = 0;
    unsigned char* ptr = 0;


    if (!ctx) {
        bytes_transferred = -EINVAL;
        goto exit; // not much we can do.
    } else {
        nr = ctx->nr;
        ptr = ctx->ptr;
    }
    printk(KERN_INFO "[bks] read(), context: %p, minor: %d, ptr: %p\n", ctx, nr, ptr);

    // acquire the lock
    if (down_interruptible(&bks_buffer_lock[nr])) {
        printk(KERN_INFO "[bks] read(), down_interruptible() returned non zero\n");
        bytes_transferred = -EINTR;
        goto exit;
    }

    // if somebody else has truncated the file, then adjust our
    // position so that we're at least looking at something valid.
    if (bks_buffer_end[nr] < ptr) {
        ptr = bks_buffer_end[nr];
    }

    // from my present position in the buffer, how many bytes of data (that were
    // previously written) remain in the buffer...
    // in other words, how many bytes can I read from the buffer?
    remain = bks_buffer_end[nr] - ptr;

    if (count < 0) {
        bytes_to_copy = 0;
    } else if (count < remain) {
        bytes_to_copy = count;
    } else {
        bytes_to_copy = remain;
    }

    if (bytes_to_copy) {
        ec = copy_to_user(buf, ptr, bytes_to_copy);
    }

    if (!ec) { // success.
        bytes_transferred = bytes_to_copy;
    } else { // not success.
        bytes_transferred = 0;
    }

    ptr += bytes_transferred;
    ctx->ptr = ptr;

    // release the lock
    up(&bks_buffer_lock[nr]);


exit:
    printk(KERN_INFO "[bks] read(), transferred %d bytes\n", bytes_transferred);
    return bytes_transferred;
}
Пример #3
0
// -------------------------------------------------------------------------
loff_t bks_llseek(struct file* file, loff_t offset, int whence)
{
    loff_t new_offset; // what we return
    loff_t current_offset = 0; // offset when we enter this fctn.
    loff_t potential_offset = 0; // what might be the new offset
    struct BksDeviceContext* ctx = file->private_data;
    int nr = 0;
    unsigned char* ptr = 0;

    printk(KERN_INFO "[bks] llseek(), offset: %Ld, whence: %d\n", offset, whence);

    if (!ctx) {
        new_offset = -EINVAL;
        goto exit; // not much we can do.
    } else {
        nr = ctx->nr;
        ptr = ctx->ptr;
    }
    printk(KERN_INFO "[bks] llseek(), context: %p, minor: %d, ptr: %p\n", ctx, nr, ptr);

    // acquire the lock
    if (0 != down_interruptible(&bks_buffer_lock[nr])) {
        printk(KERN_WARNING "[bks] llseek(), down_interruptible() returned non zero\n");
        // interrupted. bail out
        new_offset = -EINTR;
        goto exit;
    }


    // --------------------------------------------------------------------
    // do the llseek.

    // what is the offset coming in?
    current_offset = ptr - bks_buffer_begin[nr];

    // what it might be if the caller passed sane parameters.
    switch (whence) {

    case SEEK_SET:
        potential_offset = offset;
        break;

    case SEEK_CUR:
        potential_offset = current_offset + offset;
        break;

    case SEEK_END:
        potential_offset = (bks_buffer_size[nr] -1) + offset;
        break;

    default:
        // this is an invalid value for whence.
        // the kernel should not allow this...?
        potential_offset = -1; // force code below to return an error code.
        break;

    } // switch

    // check for sanity.
    if (potential_offset < 0 || !(potential_offset < bks_buffer_size[nr])) {
        new_offset = -EINVAL;
    } else {
        // seems sane. update pointer.
        new_offset = potential_offset;
        ptr = bks_buffer_begin[nr] + (long)new_offset;
        ctx->ptr = ptr;
        printk(KERN_INFO "[bks] llseek(), current_offset: %Ld, new_offset: %Ld, ptr; %p\n", current_offset, new_offset, ptr);
    }

    // release the lock
    up(&bks_buffer_lock[nr]);

exit:
    return new_offset;
}
Пример #4
0
static void gp_touchpad_gpioIrq(void *data)
{
	int ret,tmp;
	char datax[3];
	int *p;
	if (down_interruptible(&gp_touchpad_data->touchpad_sem) != 0) 
	{
		return;
	}
	gp_gpio_enable_irq(gp_touchpad_irq_data->gpiohd, 0);
	p = (int*)IO0_ADDRESS(0x5080);
	*p &= 0xf3ffffff;
	ret = gp_i2c_bus_read(iic_handle, (unsigned char*)&datax, 3);
	*p |= 0x0c000000;

	if((datax[1]&0x80) == 0)
	{
		tmp = (int)(datax[1]);
	}
	else
	{
		
		tmp = (int)(datax[1]);
		tmp -= 0xff;
	}
	input_report_rel((gp_touchpad_data->input), REL_X, tmp);
#if tp_debug_print
	printk("Xm:%3d-%x\n",tmp,tmp);
#endif

	if((datax[2]&0x80) == 0)
	{
		tmp = (int)(datax[2]);
	}
	else
	{
		tmp = (int)(datax[2]);
		tmp -= 0xff;
	}
	input_report_rel((gp_touchpad_data->input), REL_Y, tmp);
#if tp_debug_print
	printk("Ym:%3d-%x\n",tmp,tmp);
#endif

	//left button
	if(datax[0]&TP_LBTN)
	{
		input_report_key(gp_touchpad_data->input, BTN_LEFT,   1);
#if tp_debug_print
		printk("LB:+++\n");
#endif
	}
	else
	{
		input_report_key(gp_touchpad_data->input, BTN_LEFT,   0);
#if tp_debug_print
		printk("LB:...\n");
#endif
	}
	//right button
	if(datax[0]&TP_RBTN)
	{
		input_report_key(gp_touchpad_data->input, BTN_RIGHT,   1);
#if tp_debug_print
		printk("RB:+++\n");
#endif
	}
	else
	{
		input_report_key(gp_touchpad_data->input, BTN_RIGHT,   0);
#if tp_debug_print
		printk("RB:...\n");
#endif
	}
	//middle button
	if(datax[0]&TP_MBTN)
	{
		input_report_key(gp_touchpad_data->input, BTN_MIDDLE,   1);
#if tp_debug_print
		printk("MB:+++\n");
#endif
	}
	else
	{
		input_report_key(gp_touchpad_data->input, BTN_MIDDLE,   0);
#if tp_debug_print
		printk("MB:...\n");
#endif
	}

	input_sync(gp_touchpad_data->input);
#if tp_debug_print
	printk("cmd:%u,data0:%d,data1:%d\n",datax[0],datax[1],datax[2]);
#endif
	gp_gpio_set_direction(gp_touchpad_irq_data->gpiohd, GPIO_DIR_INPUT);
	gp_gpio_set_input(gp_touchpad_irq_data->gpiohd, 1);
	gp_gpio_enable_irq(gp_touchpad_irq_data->gpiohd, 1);
	up(&gp_touchpad_data->touchpad_sem);
	return;
}
Пример #5
0
tEplKernel EplLinCbEvent(tEplApiEventType EventType_p,	// IN: event type (enum)
			 tEplApiEventArg *pEventArg_p,	// IN: event argument (union)
			 void *pUserArg_p)
{
	tEplKernel EplRet = kEplSuccessful;
	int iErr;

	// block any further call to this function, i.e. enter critical section
	iErr = down_interruptible(&SemaphoreCbEvent_g);
	if (iErr != 0) {	// waiting was interrupted by signal
		EplRet = kEplShutdown;
		goto Exit;
	}
	// wait for EplApiProcess() to call ioctl
	// normally it should be waiting already for us to pass a new event
	iErr = wait_event_interruptible(WaitQueueCbEvent_g,
					(atomic_read(&AtomicEventState_g) ==
					 EVENT_STATE_IOCTL)
					|| (atomic_read(&AtomicEventState_g) ==
					    EVENT_STATE_TERM));
	if ((iErr != 0) || (atomic_read(&AtomicEventState_g) == EVENT_STATE_TERM)) {	// waiting was interrupted by signal
		EplRet = kEplShutdown;
		goto LeaveCriticalSection;
	}
	// save event information for ioctl
	EventType_g = EventType_p;
	pEventArg_g = pEventArg_p;

	// pass control to application's event callback function, i.e. EplApiProcess()
	atomic_set(&AtomicEventState_g, EVENT_STATE_READY);
	wake_up_interruptible(&WaitQueueProcess_g);

	// now, the application's event callback function processes the event

	// wait for completion of application's event callback function, i.e. EplApiProcess() calls ioctl again
	iErr = wait_event_interruptible(WaitQueueCbEvent_g,
					(atomic_read(&AtomicEventState_g) ==
					 EVENT_STATE_IOCTL)
					|| (atomic_read(&AtomicEventState_g) ==
					    EVENT_STATE_TERM));
	if ((iErr != 0) || (atomic_read(&AtomicEventState_g) == EVENT_STATE_TERM)) {	// waiting was interrupted by signal
		EplRet = kEplShutdown;
		goto LeaveCriticalSection;
	}
	// read return code from application's event callback function
	EplRet = RetCbEvent_g;

      LeaveCriticalSection:
	up(&SemaphoreCbEvent_g);

      Exit:
	// check if NMT_GS_OFF is reached
	if (EventType_p == kEplApiEventNmtStateChange) {
		if (pEventArg_p->m_NmtStateChange.m_NewNmtState == kEplNmtGsOff) {	// NMT state machine was shut down
			TRACE0("EPL:   EplLinCbEvent(NMT_GS_OFF)\n");
			uiEplState_g = EPL_STATE_SHUTDOWN;
			atomic_set(&AtomicEventState_g, EVENT_STATE_TERM);
			wake_up(&WaitQueueRelease_g);
		} else {	// NMT state machine is running
			uiEplState_g = EPL_STATE_RUNNING;
		}
	}

	return EplRet;
}
Пример #6
0
static int audio_write(struct file *file, const char *buffer,
		       size_t count, loff_t * ppos)
{
	const char *buffer0 = buffer;
	audio_state_t *state = (audio_state_t *)file->private_data;
	audio_stream_t *s = state->output_stream;
	int chunksize, ret = 0;

	DPRINTK("audio_write: count=%d\n", count);

	if (ppos != &file->f_pos)
		return -ESPIPE;
	if (s->mapped)
		return -ENXIO;
	if (!s->buffers && audio_setup_buf(s))
		return -ENOMEM;

	while (count > 0) {
		audio_buf_t *b = s->buf;

		/* Wait for a buffer to become free */
		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			if (down_trylock(&b->sem))
				break;
		} else {
			ret = -ERESTARTSYS;
			if (down_interruptible(&b->sem))
				break;
		}

		/* Feed the current buffer */
		chunksize = s->fragsize - b->size;
		if (chunksize > count)
			chunksize = count;
		DPRINTK("write %d to %d\n", chunksize, s->buf_idx);
		if (copy_from_user(b->start + b->size, buffer, chunksize)) {
			up(&b->sem);
			return -EFAULT;
		}
		b->size += chunksize;
		buffer += chunksize;
		count -= chunksize;
		if (b->size < s->fragsize) {
			up(&b->sem);
			break;
		}

		/* Send current buffer to dma */
		s->active = 1;
		sa1100_dma_queue_buffer(s->dma_ch, (void *) b,
					b->dma_addr, b->size);
		b->size = 0;	/* indicate that the buffer has been sent */
		NEXT_BUF(s, buf);
	}

	if ((buffer - buffer0))
		ret = buffer - buffer0;
	DPRINTK("audio_write: return=%d\n", ret);
	return ret;
}
static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
{
	block_allocator * allocator;
	u32 left;
	block_info * last_allocated = NULL;
	int i = 0;

	BUG_ON(!ctx);
	BUG_ON(!mem);

	allocator = (block_allocator*)ctx;
	left = mem->size_bytes;

	BUG_ON(!left);
	BUG_ON(!&allocator->mutex);

	mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
	mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
	if (NULL == mem->block_array)
	{
		MSG_ERR(("Failed to allocate block array\n"));
		return 0;
	}

	if (down_interruptible(&allocator->mutex))
	{
		MSG_ERR(("Could not get mutex to do block_allocate\n"));
		return 0;
	}

	mem->size_bytes = 0;

	while ((left > 0) && (allocator->first_free))
	{
		block_info * block;

		block = allocator->first_free;
		allocator->first_free = allocator->first_free->next;
		block->next = last_allocated;
		last_allocated = block;
		allocator->num_free--;

		mem->block_array[i].addr = get_phys(allocator, block);
		mem->block_array[i].size = UMP_BLOCK_SIZE;
		mem->size_bytes += UMP_BLOCK_SIZE;

		i++;

		if (left < UMP_BLOCK_SIZE) left = 0;
		else left -= UMP_BLOCK_SIZE;
	}

	if (left)
	{
		block_info * block;
		/* release all memory back to the pool */
		while (last_allocated)
		{
			block = last_allocated->next;
			last_allocated->next = allocator->first_free;
			allocator->first_free = last_allocated;
			last_allocated = block;
			allocator->num_free++;
		}

		vfree(mem->block_array);
		mem->backend_info = NULL;
		mem->block_array = NULL;

		DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
		up(&allocator->mutex);

		return 0;
	}

	mem->backend_info = last_allocated;

	up(&allocator->mutex);

	return 1;
}
Пример #8
0
DMA_Handle_t dma_request_channel(DMA_Device_t dev)
#endif
{
	DMA_Handle_t handle;
	DMA_DeviceAttribute_t *devAttr;
	DMA_Channel_t *channel;
	int controllerIdx;
	int controllerIdx2;
	int channelIdx;

	if (down_interruptible(&gDMA.lock) < 0) {
		return -ERESTARTSYS;
	}

	if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
		handle = -ENODEV;
		goto out;
	}
	devAttr = &DMA_gDeviceAttribute[dev];

#if (DMA_DEBUG_TRACK_RESERVATION)
	{
		char *s;

		s = strrchr(fileName, '/');
		if (s != NULL) {
			fileName = s + 1;
		}
	}
#endif
	if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
		/* This device has already been requested and not been freed */

		printk(KERN_ERR "%s: device %s is already requested\n",
		       __func__, devAttr->name);
		handle = -EBUSY;
		goto out;
	}

	if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
		/* This device has a dedicated channel. */

		channel =
		    &gDMA.controller[devAttr->dedicatedController].
		    channel[devAttr->dedicatedChannel];
		if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
			handle = -EBUSY;
			goto out;
		}

		channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
		devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;

#if (DMA_DEBUG_TRACK_RESERVATION)
		channel->fileName = fileName;
		channel->lineNum = lineNum;
#endif
		handle =
		    MAKE_HANDLE(devAttr->dedicatedController,
				devAttr->dedicatedChannel);
		goto out;
	}

	/* This device needs to use one of the shared channels. */

	handle = DMA_INVALID_HANDLE;
	while (handle == DMA_INVALID_HANDLE) {
		/* Scan through the shared channels and see if one is available */

		for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
		     controllerIdx2++) {
			/* Check to see if we should try on controller 1 first. */

			controllerIdx = controllerIdx2;
			if ((devAttr->
			     flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
				controllerIdx = 1 - controllerIdx;
			}

			/* See if the device is available on the controller being tested */

			if ((devAttr->
			     flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
			    != 0) {
				for (channelIdx = 0;
				     channelIdx < DMA_NUM_CHANNELS;
				     channelIdx++) {
					channel =
					    &gDMA.controller[controllerIdx].
					    channel[channelIdx];

					if (((channel->
					      flags &
					      DMA_CHANNEL_FLAG_IS_DEDICATED) ==
					     0)
					    &&
					    ((channel->
					      flags & DMA_CHANNEL_FLAG_IN_USE)
					     == 0)) {
						if (((channel->
						      flags &
						      DMA_CHANNEL_FLAG_LARGE_FIFO)
						     != 0)
						    &&
						    ((devAttr->
						      flags &
						      DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
						     == 0)) {
							/* This channel is a large fifo - don't tie it up */
							/* with devices that we don't want using it. */

							continue;
						}

						channel->flags |=
						    DMA_CHANNEL_FLAG_IN_USE;
						channel->devType = dev;
						devAttr->flags |=
						    DMA_DEVICE_FLAG_IN_USE;

#if (DMA_DEBUG_TRACK_RESERVATION)
						channel->fileName = fileName;
						channel->lineNum = lineNum;
#endif
						handle =
						    MAKE_HANDLE(controllerIdx,
								channelIdx);

						/* Now that we've reserved the channel - we can go ahead and configure it */

						if (ConfigChannel(handle) != 0) {
							handle = -EIO;
							printk(KERN_ERR
							       "dma_request_channel: ConfigChannel failed\n");
						}
						goto out;
					}
				}
			}
		}

		/* No channels are currently available. Let's wait for one to free up. */

		{
			DEFINE_WAIT(wait);

			prepare_to_wait(&gDMA.freeChannelQ, &wait,
					TASK_INTERRUPTIBLE);
			up(&gDMA.lock);
			schedule();
			finish_wait(&gDMA.freeChannelQ, &wait);

			if (signal_pending(current)) {
				/* We don't currently hold gDMA.lock, so we return directly */

				return -ERESTARTSYS;
			}
		}

		if (down_interruptible(&gDMA.lock)) {
			return -ERESTARTSYS;
		}
	}

out:
	up(&gDMA.lock);

	return handle;
}
Пример #9
0
int osal_secro_v5_lock(void)
{
	return down_interruptible(&osal_secro_v5_sem);
}
Пример #10
0
/* thread to serialize all requests, both sync and async */
static int async_task(void *param)
 {
    struct hif_device *device;
    BUS_REQUEST *request;
    int status;
    unsigned long flags;

    device = (struct hif_device *)param;
    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task\n"));
    set_current_state(TASK_INTERRUPTIBLE);
    while(!device->async_shutdown) {
        /* wait for work */
        if (down_interruptible(&device->sem_async) != 0) {
            /* interrupted, exit */
            AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task interrupted\n"));
            break;
        }
        if (device->async_shutdown) {
            AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task stopping\n"));
            break;
        }
        /* we want to hold the host over multiple cmds if possible, but holding the host blocks card interrupts */
        sdio_claim_host(device->func);
        spin_lock_irqsave(&device->asynclock, flags);
        /* pull the request to work on */
        while (device->asyncreq != NULL) {
            request = device->asyncreq;
            if (request->inusenext != NULL) {
                device->asyncreq = request->inusenext;
            } else {
                device->asyncreq = NULL;
            }
            spin_unlock_irqrestore(&device->asynclock, flags);
            AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task processing req: 0x%lX\n", (unsigned long)request));
            
            if (request->pScatterReq != NULL) {
                A_ASSERT(device->scatter_enabled);
                    /* this is a queued scatter request, pass the request to scatter routine which
                     * executes it synchronously, note, no need to free the request since scatter requests
                     * are maintained on a separate list */
                status = DoHifReadWriteScatter(device,request);
            } else {                
                    /* call HIFReadWrite in sync mode to do the work */
                status = __HIFReadWrite(device, request->address, request->buffer,
                                      request->length, request->request & ~HIF_SYNCHRONOUS, NULL);
                if (request->request & HIF_ASYNCHRONOUS) {
                    void *context = request->context;
                    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task freeing req: 0x%lX\n", (unsigned long)request));
                    hifFreeBusRequest(device, request);
                    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task completion routine req: 0x%lX\n", (unsigned long)request));
                    device->htcCallbacks.rwCompletionHandler(context, status);
                } else {
                    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task upping req: 0x%lX\n", (unsigned long)request));
                    request->status = status;
                    up(&request->sem_req);
                }
            }
            spin_lock_irqsave(&device->asynclock, flags);
        }
        spin_unlock_irqrestore(&device->asynclock, flags);
        sdio_release_host(device->func);
    }

    complete_and_exit(&device->async_completion, 0);
    return 0;
}
Пример #11
0
static int dma_proc_read_channels(char *buf, char **start, off_t offset,
				  int count, int *eof, void *data)
{
	int controllerIdx;
	int channelIdx;
	int limit = count - 200;
	int len = 0;
	DMA_Channel_t *channel;

	if (down_interruptible(&gDMA.lock) < 0) {
		return -ERESTARTSYS;
	}

	for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
	     controllerIdx++) {
		for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
		     channelIdx++) {
			if (len >= limit) {
				break;
			}

			channel =
			    &gDMA.controller[controllerIdx].channel[channelIdx];

			len +=
			    sprintf(buf + len, "%d:%d ", controllerIdx,
				    channelIdx);

			if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
			    0) {
				len +=
				    sprintf(buf + len, "Dedicated for %s ",
					    DMA_gDeviceAttribute[channel->
								 devType].name);
			} else {
				len += sprintf(buf + len, "Shared ");
			}

			if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) {
				len += sprintf(buf + len, "No ISR ");
			}

			if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) {
				len += sprintf(buf + len, "Fifo: 128 ");
			} else {
				len += sprintf(buf + len, "Fifo: 64  ");
			}

			if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
				len +=
				    sprintf(buf + len, "InUse by %s",
					    DMA_gDeviceAttribute[channel->
								 devType].name);
#if (DMA_DEBUG_TRACK_RESERVATION)
				len +=
				    sprintf(buf + len, " (%s:%d)",
					    channel->fileName,
					    channel->lineNum);
#endif
			} else {
				len += sprintf(buf + len, "Avail ");
			}

			if (channel->lastDevType != DMA_DEVICE_NONE) {
				len +=
				    sprintf(buf + len, "Last use: %s ",
					    DMA_gDeviceAttribute[channel->
								 lastDevType].
					    name);
			}

			len += sprintf(buf + len, "\n");
		}
	}
	up(&gDMA.lock);
	*eof = 1;

	return len;
}
Пример #12
0
/* queue a read/write request */
int
HIFReadWrite(struct hif_device *device,
             u32 address,
             u8 *buffer,
             u32 length,
             u32 request,
             void *context)
{
    int    status = 0;
    BUS_REQUEST *busrequest;


    AR_DEBUG_ASSERT(device != NULL);
    AR_DEBUG_ASSERT(device->func != NULL);

    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device: %p addr:0x%X\n", device,address));

    do {            
        if ((request & HIF_ASYNCHRONOUS) || (request & HIF_SYNCHRONOUS)){
            /* serialize all requests through the async thread */
            AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Execution mode: %s\n", 
                        (request & HIF_ASYNCHRONOUS)?"Async":"Synch"));
            busrequest = hifAllocateBusRequest(device);
            if (busrequest == NULL) {
                AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 
                    ("AR6000: no async bus requests available (%s, addr:0x%X, len:%d) \n", 
                        request & HIF_READ ? "READ":"WRITE", address, length));
                return A_ERROR;
            }
            busrequest->address = address;
            busrequest->buffer = buffer;
            busrequest->length = length;
            busrequest->request = request;
            busrequest->context = context;
            
            AddToAsyncList(device, busrequest);
            
            if (request & HIF_SYNCHRONOUS) {
                AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: queued sync req: 0x%lX\n", (unsigned long)busrequest));

                /* wait for completion */
                up(&device->sem_async);
                if (down_interruptible(&busrequest->sem_req) != 0) {
                    /* interrupted, exit */
                    return A_ERROR;
                } else {
                    int status = busrequest->status;
                    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: sync return freeing 0x%lX: 0x%X\n", 
						      (unsigned long)busrequest, busrequest->status));
                    AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: freeing req: 0x%X\n", (unsigned int)request));
                    hifFreeBusRequest(device, busrequest);
                    return status;
                }
            } else {
                AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: queued async req: 0x%lX\n", (unsigned long)busrequest));
                up(&device->sem_async);
                return A_PENDING;
            }
        } else {
            AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
                            ("AR6000: Invalid execution mode: 0x%08x\n", (unsigned int)request));
            status = A_EINVAL;
            break;
        }
    } while(0);

    return status;
}
Пример #13
0
static ssize_t lcd_write(struct file *file, const char __user * user_buffer, size_t count, loff_t *ppos)
{
    struct usb_lcd *dev;
        int retval = 0, r;
    struct urb *urb = NULL;
    char *buf = NULL;
    
    dev = (struct usb_lcd *)file->private_data;
    
    /* verify that we actually have some data to write */
    if (count == 0)
        goto exit;

    r = down_interruptible(&dev->limit_sem);
    if (r < 0)
        return -EINTR;

    /* create a urb, and a buffer for it, and copy the data to the urb */
    urb = usb_alloc_urb(0, GFP_KERNEL);
    if (!urb) {
        retval = -ENOMEM;
        goto err_no_buf;
    }
    
    buf = usb_buffer_alloc(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
    if (!buf) {
        retval = -ENOMEM;
        goto error;
    }
    
    if (copy_from_user(buf, user_buffer, count)) {
        retval = -EFAULT;
        goto error;
    }
    
    /* initialize the urb properly */
    usb_fill_bulk_urb(urb, dev->udev,
              usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
              buf, count, lcd_write_bulk_callback, dev);
    urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

    usb_anchor_urb(urb, &dev->submitted);
    
    /* send the data out the bulk port */
    retval = usb_submit_urb(urb, GFP_KERNEL);
    if (retval) {
        err("USBLCD: %s - failed submitting write urb, error %d", __func__, retval);
        goto error_unanchor;
    }
    
    /* release our reference to this urb, the USB core will eventually free it entirely */
    usb_free_urb(urb);

exit:
    return count;
error_unanchor:
    usb_unanchor_urb(urb);
error:
    usb_buffer_free(dev->udev, count, buf, urb->transfer_dma);
    usb_free_urb(urb);
err_no_buf:
    up(&dev->limit_sem);
    return retval;
}
Пример #14
0
static int recv_stream(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *m, size_t buf_len, int flags)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	unsigned int q_len;
	unsigned int sz;
	int sz_to_copy;
	int sz_copied = 0;
	int needed;
	char __user *crs = m->msg_iov->iov_base;
	unsigned char *buf_crs;
	u32 err;
	int res;

	/* Currently doesn't support receiving into multiple iovec entries */

	if (m->msg_iovlen != 1)
		return -EOPNOTSUPP;

	/* Catch invalid receive attempts */

	if (unlikely(!buf_len))
		return -EINVAL;

	if (unlikely(sock->state == SS_DISCONNECTING)) {
		if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
			return -ENOTCONN;
	} else if (unlikely(sock->state != SS_CONNECTED))
		return -ENOTCONN;

	/* Look for a message in receive queue; wait if necessary */

	if (unlikely(down_interruptible(&tsock->sem)))
		return -ERESTARTSYS;

restart:
	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
		     (flags & MSG_DONTWAIT))) {
		res = -EWOULDBLOCK;
		goto exit;
	}

	if ((res = wait_event_interruptible(
		*sock->sk->sk_sleep,
		((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
		 (sock->state == SS_DISCONNECTING))) )) {
		goto exit;
	}

	/* Catch attempt to receive on an already terminated connection */
	/* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */

	if (!q_len) {
		res = -ENOTCONN;
		goto exit;
	}

	/* Get access to first message in receive queue */

	buf = skb_peek(&sock->sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_queue(tsock);
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */

	if (sz_copied == 0) {
		set_orig_addr(m, msg);
		if ((res = anc_data_recv(m, msg, tsock->p)))
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
		sz = skb_tail_pointer(buf) - buf_crs;

		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;
		if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
			res = -EFAULT;
			goto exit;
		}
		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
				TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
			goto exit;
		}

		crs += sz_to_copy;
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
		advance_queue(tsock);
	}

	/* Loop around if more data is required */

	if ((sz_copied < buf_len)    /* didn't get all requested data */
	    && (flags & MSG_WAITALL) /* ... and need to wait for more */
	    && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
	    && (!err)                /* ... and haven't reached a FIN */
	    )
		goto restart;

exit:
	up(&tsock->sem);
	return sz_copied ? sz_copied : res;
}
Пример #15
0
//-.-.-.-.-.-.-.-.-.-.-.-.-.-.- Dtu2xxRequestExclusiveAccess -.-.-.-.-.-.-.-.-.-.-.-.-.-.-
//
// Called by DeviceIoControl to request exclusive access to an Rx channel.
// If exclusive access is granted, the thread pointer is stored, so that the exclusive-
// access lock can be released upon a CLOSE of the thread.
// The exclusive-access variables are protected by a spin lock.
//
Int  Dtu2xxRequestExclusiveAccess(
	IN PDTU2XX_FDO  pFdo,			// Functional device object
	IN struct file* pFileObject,	// File object requesting/dropping excl. access
	IN Int  PortIndex,				// Channel index
	IN Int  Request,				// 0 = Request exclusive access
									// 1 = Release exclusive access
	OUT Int*  pGranted)				// Granted Yes / No
{
	Channel* pCh=NULL;

	// Check port index
	if ( PortIndex<0 || PortIndex >= pFdo->m_NumChannels )
	{
		DTU2XX_LOG( KERN_INFO, "Dtu2xxRequestExclusiveAccess: PortIndex=%d INVALID",
					PortIndex );
		return -EFAULT;
	}
	pCh = &pFdo->m_Channel[PortIndex];

#if LOG_LEVEL > 0
	DTU2XX_LOG( KERN_INFO, "[%d] Dtu2xxRequestExclusiveAccess: Request=%d",
			    PortIndex, Request );
#endif

	if (Request!=0 && Request!=1)
	{
		DTU2XX_LOG( KERN_INFO, "[%d] Dtu2xxRequestExclusiveAccess: Request=%d ILLEGAL",
				    PortIndex, Request );
		return -EFAULT;
	}

	if ( 0!=down_interruptible(&pCh->m_ExclAccLock) )
		return -EFAULT;

	if (Request == 0)
	{
		// Request exclusive access
		if (pCh->m_ExclAccess == 0)
		{
			pCh->m_ExclAccess = 1;
			pCh->m_pExclAccFileObject = pFileObject;
			*pGranted = 1;
#if LOG_LEVEL > 0
			DTU2XX_LOG( KERN_INFO, "[%d] Dtu2xxRequestExclusiveAccess: Exclusive access "
					    "GRANTED", PortIndex );
#endif
		} else {
			*pGranted = 0;
#if LOG_LEVEL > 0
			DTU2XX_LOG( KERN_INFO, "[%d] Dtu2xxRequestExclusiveAccess: Exclusive access "
					    "DENIED", PortIndex );
#endif
		}
	}
	else
	{
		// Release exclusive access.
		// Based on cooperative model: The exclusive-access lock is ALWAYS cleared,
		// without file-object check.
		// So, DeviceIoControl can also be used to unconditionally clear the lock.
		pCh->m_ExclAccess = 0;
#if LOG_LEVEL > 0
		DTU2XX_LOG( KERN_INFO, "[%d] Dtu2xxRequestExclusiveAccess: Exclusive access "
				    "RELEASED", PortIndex );
#endif
	}
	up(&pCh->m_ExclAccLock);
	return 0;
}
Пример #16
0
int osal_rid_lock(void)
{
	return down_interruptible(&rid_sem);
}
Пример #17
0
static int rdma_request(struct p9_client *client, struct p9_req_t *req)
{
	struct p9_trans_rdma *rdma = client->trans;
	struct ib_send_wr wr, *bad_wr;
	struct ib_sge sge;
	int err = 0;
	unsigned long flags;
	struct p9_rdma_context *c = NULL;
	struct p9_rdma_context *rpl_context = NULL;

	/* Allocate an fcall for the reply */
	rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
	if (!rpl_context)
		goto err_close;

	/*
	 * If the request has a buffer, steal it, otherwise
	 * allocate a new one.  Typically, requests should already
	 * have receive buffers allocated and just swap them around
	 */
	if (!req->rc) {
		req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
								GFP_KERNEL);
		if (req->rc) {
			req->rc->sdata = (char *) req->rc +
						sizeof(struct p9_fcall);
			req->rc->capacity = client->msize;
		}
	}
	rpl_context->rc = req->rc;
	if (!rpl_context->rc) {
		kfree(rpl_context);
		goto err_close;
	}

	/*
	 * Post a receive buffer for this request. We need to ensure
	 * there is a reply buffer available for every outstanding
	 * request. A flushed request can result in no reply for an
	 * outstanding request, so we must keep a count to avoid
	 * overflowing the RQ.
	 */
	if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
		err = post_recv(client, rpl_context);
		if (err) {
			kfree(rpl_context->rc);
			kfree(rpl_context);
			goto err_close;
		}
	} else
		atomic_dec(&rdma->rq_count);

	/* remove posted receive buffer from request structure */
	req->rc = NULL;

	/* Post the request */
	c = kmalloc(sizeof *c, GFP_KERNEL);
	if (!c)
		goto err_close;
	c->req = req;

	c->busa = ib_dma_map_single(rdma->cm_id->device,
				    c->req->tc->sdata, c->req->tc->size,
				    DMA_TO_DEVICE);
	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
		goto error;

	sge.addr = c->busa;
	sge.length = c->req->tc->size;
	sge.lkey = rdma->lkey;

	wr.next = NULL;
	c->wc_op = IB_WC_SEND;
	wr.wr_id = (unsigned long) c;
	wr.opcode = IB_WR_SEND;
	wr.send_flags = IB_SEND_SIGNALED;
	wr.sg_list = &sge;
	wr.num_sge = 1;

	if (down_interruptible(&rdma->sq_sem))
		goto error;

	return ib_post_send(rdma->qp, &wr, &bad_wr);

 error:
	P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
	return -EIO;

 err_close:
	spin_lock_irqsave(&rdma->req_lock, flags);
	if (rdma->state < P9_RDMA_CLOSING) {
		rdma->state = P9_RDMA_CLOSING;
		spin_unlock_irqrestore(&rdma->req_lock, flags);
		rdma_disconnect(rdma->cm_id);
	} else
		spin_unlock_irqrestore(&rdma->req_lock, flags);
	return err;
}
Пример #18
0
int osal_hacc_lock(void)
{
	return down_interruptible(&hacc_sem);
}
Пример #19
0
static int audio_read(struct file *file, char *buffer,
		      size_t count, loff_t * ppos)
{
	char *buffer0 = buffer;
	audio_state_t *state = (audio_state_t *)file->private_data;
	audio_stream_t *s = state->input_stream;
	int chunksize, ret = 0;

	DPRINTK("audio_read: count=%d\n", count);

	if (ppos != &file->f_pos)
		return -ESPIPE;
	if (s->mapped)
		return -ENXIO;

	if (!s->active) {
		if (!s->buffers && audio_setup_buf(s))
			return -ENOMEM;
		audio_check_tx_spin(state);
		audio_prime_dma(s);
	}

	while (count > 0) {
		audio_buf_t *b = s->buf;

		/* Wait for a buffer to become full */
		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			if (down_trylock(&b->sem))
				break;
		} else {
			ret = -ERESTARTSYS;
			if (down_interruptible(&b->sem))
				break;
		}

		/* Grab data from the current buffer */
		chunksize = b->size;
		if (chunksize > count)
			chunksize = count;
		DPRINTK("read %d from %d\n", chunksize, s->buf_idx);
		if (copy_to_user(buffer,
				 b->start + s->fragsize - b->size,
				 chunksize)) {
			up(&b->sem);
			return -EFAULT;
		}
		b->size -= chunksize;
		buffer += chunksize;
		count -= chunksize;
		if (b->size > 0) {
			up(&b->sem);
			break;
		}

		/* Make current buffer available for DMA again */
		sa1100_dma_queue_buffer(s->dma_ch, (void *) b,
					b->dma_addr, s->fragsize);
		NEXT_BUF(s, buf);
	}

	if ((buffer - buffer0))
		ret = buffer - buffer0;
	DPRINTK("audio_read: return=%d\n", ret);
	return ret;
}
Пример #20
0
int osal_verify_lock(void)
{
	return down_interruptible(&osal_verify_sem);
}
Пример #21
0
ssize_t scull_write(struct file *filp, const char __user *buf, size_t count,
                loff_t *f_pos)
{
	struct scull_dev *dev = filp->private_data;
	struct scull_qset *dptr;
	int quantum = dev->quantum, qset = dev->qset;
	int itemsize = quantum * qset;
	int item, s_pos, q_pos, rest;
	ssize_t retval = -ENOMEM; /* value used in "goto out" statements */

	/* 获得互斥锁 */
	if (down_interruptible(&dev->sem))
		return -ERESTARTSYS;
    printk(KERN_ALERT "Debug by andrea:scull_write()/n");
	/* find listitem, qset index and offset in the quantum */
	/* item代表要写入的位置在哪一个量子集中,s_pos代表要写入的位置在哪个量子中,q_pos代表要写入的的位置在量子中的位置*/
	item = (long)*f_pos / itemsize;
	rest = (long)*f_pos % itemsize;
	s_pos = rest / quantum; q_pos = rest % quantum;

	/* follow the list up to the right position */
	/* 取出量子 */
	dptr = scull_follow(dev, item);
	if (dptr == NULL)
		goto out;
	/* 如果量子集中的数组不存在,分配内存*/
	if (!dptr->data) {
		dptr->data = kmalloc(qset * sizeof(char *), GFP_KERNEL);
		if (!dptr->data)
			goto out;
		memset(dptr->data, 0, qset * sizeof(char *));
	}
	/* 如果指定的量子不存在,创建量子*/
	if (!dptr->data[s_pos]) {
		dptr->data[s_pos] = kmalloc(quantum, GFP_KERNEL);
		if (!dptr->data[s_pos])
			goto out;
	}
	/* write only up to the end of this quantum */
	/* 限制一次write最多写满一个量子,否则会越界 */
	if (count > quantum - q_pos)
		count = quantum - q_pos;

	/* 把数据写入量子 */
	if (copy_from_user(dptr->data[s_pos]+q_pos, buf, count)) {
		retval = -EFAULT;
		goto out;
	}

	/* 文件指针后移count个字节 */
	*f_pos += count;
	/* 返回写入的count */
	retval = count;

    /* update the size */
    /* 更新文件大小 */
	if (dev->size < *f_pos)
		dev->size = *f_pos;

  out:
	up(&dev->sem);
	return retval;
}
Пример #22
0
static int acc_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
    int err = 0;
    struct acc *acc;
    acc = container_of(inode->i_cdev, struct acc, cdev);

    switch (cmd) {

    case BMI_MDACC_ACCELEROMETER_SET_CONFIG:
    {
        struct mdacc_accel_config new_cfg;

        err = copy_from_user ( (void*)&new_cfg, (void*)arg, sizeof (struct mdacc_accel_config) );
        if (err) {
            return -EFAULT;
        }

        err = check_config (&new_cfg);
        if (err) {
            return -EINVAL;
        }

        if (acc->cfg.run) {
            mon_stop_accel (acc->mon);
        }

        // take the mon semaphore
        if (down_interruptible (&acc->mon->sem))
            return -ERESTARTSYS;

        memcpy ( &acc->cfg, &new_cfg, sizeof (struct mdacc_accel_config) );
        cque_destroy (acc->cque);
        acc->cque = cque_create  (acc->cfg.read_queue_size, acc->cfg.read_queue_threshold);

        // release the mon semaphore
        up (&acc->mon->sem);

        mon_set_config_accel( acc->mon, &acc->cfg);

    }
    break;

    case BMI_MDACC_ACCELEROMETER_GET_CONFIG:

        mon_get_config_accel( acc->mon, &acc->cfg);


        err = copy_to_user ( (void*)arg, &acc->cfg, sizeof (struct mdacc_accel_config) );
        if (err) {
            return -EFAULT;
        }
        break;

    case BMI_MDACC_ACCELEROMETER_RUN:

        acc->cfg.run = 1;
        err = mon_start_accel (acc->mon);
        if (err) {
            return -ENODEV;
        }
        break;


    case BMI_MDACC_ACCELEROMETER_STOP:

        acc->cfg.run = 0;
        err = mon_stop_accel (acc->mon);
        if (err) {
            return -ENODEV;
        }
        break;

    default:
        printk (KERN_ERR "acc_ioctl() - error exit\n");
        return -ENOTTY;
    }

    return 0;
}
Пример #23
0
static int s2250_detect(struct i2c_adapter *adapter, int addr, int kind)
{
	struct i2c_client *client;
	struct s2250 *dec;
	u8 *data;
	struct go7007 *go = i2c_get_adapdata(adapter);
	struct go7007_usb *usb = go->hpi_context;

	client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
	if (client == NULL)
		return -ENOMEM;
	memcpy(client, &s2250_client_templ,
	       sizeof(s2250_client_templ));
	client->adapter = adapter;

	dec = kmalloc(sizeof(struct s2250), GFP_KERNEL);
	if (dec == NULL) {
		kfree(client);
		return -ENOMEM;
	}

	dec->std = V4L2_STD_NTSC;
	dec->brightness = 50;
	dec->contrast = 50;
	dec->saturation = 50;
	dec->hue = 0;
	client->addr = TLV320_ADDRESS;
	i2c_set_clientdata(client, dec);

	printk(KERN_DEBUG
	       "s2250: initializing video decoder on %s\n",
	       adapter->name);

	/* initialize the audio */
	client->addr = TLV320_ADDRESS;
	if (write_regs(client, aud_regs) < 0) {
		printk(KERN_ERR
		       "s2250: error initializing audio\n");
		kfree(client);
		kfree(dec);
		return 0;
	}
	client->addr = S2250_VIDDEC;
	i2c_set_clientdata(client, dec);

	if (write_regs(client, vid_regs) < 0) {
		printk(KERN_ERR
		       "s2250: error initializing decoder\n");
		kfree(client);
		kfree(dec);
		return 0;
	}
	if (write_regs_fp(client, vid_regs_fp) < 0) {
		printk(KERN_ERR
		       "s2250: error initializing decoder\n");
		kfree(client);
		kfree(dec);
		return 0;
	}
	/* set default channel */
	/* composite */
	write_reg_fp(client, 0x20, 0x020 | 1);
	write_reg_fp(client, 0x21, 0x662);
	write_reg_fp(client, 0x140, 0x060);

	/* set default audio input */
	dec->audio_input = 0;
	write_reg(client, 0x08, 0x02); /* Line In */

	if (down_interruptible(&usb->i2c_lock) == 0) {
		data = kzalloc(16, GFP_KERNEL);
		if (data != NULL) {
			int rc;
			rc = go7007_usb_vendor_request(go, 0x41, 0, 0,
						       data, 16, 1);
			if (rc > 0) {
				u8 mask;
				data[0] = 0;
				mask = 1<<5;
				data[0] &= ~mask;
				data[1] |= mask;
				go7007_usb_vendor_request(go, 0x40, 0,
							  (data[1]<<8)
							  + data[1],
							  data, 16, 0);
			}
			kfree(data);
		}
		up(&usb->i2c_lock);
	}

	i2c_attach_client(client);
	printk("s2250: initialized successfully\n");
	return 0;
}
Пример #24
0
/*H:030
 * Let's jump straight to the the main loop which runs the Guest.
 * Remember, this is called by the Launcher reading /dev/lguest, and we keep
 * going around and around until something interesting happens.
 */
int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{
	/* We stop running once the Guest is dead. */
	while (!cpu->lg->dead) {
		unsigned int irq;
		bool more;

		/* First we run any hypercalls the Guest wants done. */
		if (cpu->hcall) {
			// printk("=== DUMPING REGISTERS HYPERCALL ===\n");
			// dump_cpu_regs(cpu);
			do_hypercalls(cpu);
		}

		/*
		 * It's possible the Guest did a NOTIFY hypercall to the
		 * Launcher.
		 */
		if (cpu->pending_notify) {
			/*
			 * Does it just needs to write to a registered
			 * eventfd (ie. the appropriate virtqueue thread)?
			 */
			if (!send_notify_to_eventfd(cpu)) {
				/* OK, we tell the main Launcher. */
				if (put_user(cpu->pending_notify, user))
					return -EFAULT;
				return sizeof(cpu->pending_notify);
			}
		}

		/*
		 * All long-lived kernel loops need to check with this horrible
		 * thing called the freezer.  If the Host is trying to suspend,
		 * it stops us.
		 */
		try_to_freeze();

		/* Check for signals */
		if (signal_pending(current))
			return -ERESTARTSYS;

		/*
		 * Check if there are any interrupts which can be delivered now:
		 * if so, this sets up the hander to be executed when we next
		 * run the Guest.
		 */
		irq = interrupt_pending(cpu, &more);
		if (irq < LGUEST_IRQS)
			try_deliver_interrupt(cpu, irq, more);

		/*
		 * Just make absolutely sure the Guest is still alive.  One of
		 * those hypercalls could have been fatal, for example.
		 */
		if (cpu->lg->dead)
			break;

		/**
		 * If the guest is suspended skip.
		 */
		// printk("Checking for suspend\n");
		if(cpu->suspended) {
			// Sleep
			down_interruptible(&cpu->suspend_lock);
			// Unlock the lock since we no longer need it
			up(&cpu->suspend_lock);
			// set_current_state(TASK_INTERRUPTIBLE);
			// cond_resched();
			// schedule();
			// printk("Suspended\n");
			// continue;
			// TODO: Attempt to fix clock skew and nmi here?
			init_clockdev(cpu);
		}

		/*
		 * If the Guest asked to be stopped, we sleep.  The Guest's
		 * clock timer will wake us.
		 */
		if (cpu->halted) {
			set_current_state(TASK_INTERRUPTIBLE);
			/*
			 * Just before we sleep, make sure no interrupt snuck in
			 * which we should be doing.
			 */
			if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
				set_current_state(TASK_RUNNING);
			else 
				schedule();
			continue;
		}

		/*
		 * OK, now we're ready to jump into the Guest.  First we put up
		 * the "Do Not Disturb" sign:
		 */
		local_irq_disable();

		/* Actually run the Guest until something happens. */
		lguest_arch_run_guest(cpu);

		/* Now we're ready to be interrupted or moved to other CPUs */
		local_irq_enable();

		/* Now we deal with whatever happened to the Guest. */
		lguest_arch_handle_trap(cpu);
	}

	/* Special case: Guest is 'dead' but wants a reboot. */
	if (cpu->lg->dead == ERR_PTR(-ERESTART))
		return -ERESTART;

	/* The Guest is dead => "No such file or directory" */
	return -ENOENT;
}
Пример #25
0
/*
 * s390_machine_check_handler
 *
 * machine check handler, dequeueing machine check entries
 *  and processing them
 */
static int s390_machine_check_handler( void *parm)
{
	struct semaphore *sem = parm;
	unsigned long     flags;
	mache_t          *pmache;

	int               found = 0;

        /* set name to something sensible */
        strcpy (current->comm, "kmcheck");


        /* block all signals */
        sigfillset(&current->blocked);

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_NOTICE "mach_handler : ready\n");
#endif	

	do {

#ifdef S390_MACHCHK_DEBUG
		printk( KERN_NOTICE "mach_handler : waiting for wakeup\n");
#endif	

		down_interruptible( sem );

#ifdef S390_MACHCHK_DEBUG
		printk( KERN_NOTICE "\nmach_handler : wakeup ... \n");
#endif	
		found = 0; /* init ... */

		__save_flags( flags );
		__cli();

		do {

		pmache = s390_dequeue_mchchk();

		if ( pmache )
		{
			found = 1;
		
			if ( pmache->mcic.mcc.mcd.cp )
			{
				crwe_t *pcrwe_n;
				crwe_t *pcrwe_h;

				s390_do_crw_pending( pmache->mc.crwe );

				pcrwe_h = pmache->mc.crwe;
				pcrwe_n = pmache->mc.crwe->crwe_next;

				pmache->mcic.mcc.mcd.cp = 0;
				pmache->mc.crwe         = NULL;

				spin_lock( &crw_queue_lock);

				while ( pcrwe_h )
				{
					pcrwe_h->crwe_next = crw_buffer_anchor;
					crw_buffer_anchor  = pcrwe_h;
					pcrwe_h            = pcrwe_n;

					if ( pcrwe_h != NULL )
						pcrwe_n = pcrwe_h->crwe_next;

				} /* endwhile */

				spin_unlock( &crw_queue_lock);

			} /* endif */

#ifdef CONFIG_MACHCHK_WARNING
			if ( pmache->mcic.mcc.mcd.w )
			{
				ctrl_alt_del();		// shutdown NOW!
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : kill -SIGPWR init\n");
#endif
			} /* endif */
#endif

			s390_enqueue_free_mchchk( pmache );
		}
		else
		{

			// unconditional surrender ...
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : nothing to do, sleeping\n");
#endif	

		} /* endif */	

		} while ( pmache );

		__restore_flags( flags );

	} while ( 1 );

	return( 0);
}
Пример #26
0
/*
 * Retrieve CRWs and call function to handle event.
 */
static int crw_collect_info(void *unused)
{
	struct crw crw[2];
	int ccode;
	unsigned int chain;
	int ignore;

repeat:
	ignore = down_interruptible(&crw_semaphore);
	chain = 0;
	while (1) {
		crw_handler_t handler;

		if (unlikely(chain > 1)) {
			struct crw tmp_crw;

			printk(KERN_WARNING"%s: Code does not support more "
			       "than two chained crws; please report to "
			       "[email protected]!\n", __func__);
			ccode = stcrw(&tmp_crw);
			printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
			       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
			       __func__, tmp_crw.slct, tmp_crw.oflw,
			       tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
			       tmp_crw.erc, tmp_crw.rsid);
			printk(KERN_WARNING"%s: This was crw number %x in the "
			       "chain\n", __func__, chain);
			if (ccode != 0)
				break;
			chain = tmp_crw.chn ? chain + 1 : 0;
			continue;
		}
		ccode = stcrw(&crw[chain]);
		if (ccode != 0)
			break;
		printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
		       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		       crw[chain].slct, crw[chain].oflw, crw[chain].chn,
		       crw[chain].rsc, crw[chain].anc, crw[chain].erc,
		       crw[chain].rsid);
		/* Check for overflows. */
		if (crw[chain].oflw) {
			int i;

			pr_debug("%s: crw overflow detected!\n", __func__);
			mutex_lock(&crw_handler_mutex);
			for (i = 0; i < NR_RSCS; i++) {
				if (crw_handlers[i])
					crw_handlers[i](NULL, NULL, 1);
			}
			mutex_unlock(&crw_handler_mutex);
			chain = 0;
			continue;
		}
		if (crw[0].chn && !chain) {
			chain++;
			continue;
		}
		mutex_lock(&crw_handler_mutex);
		handler = crw_handlers[crw[chain].rsc];
		if (handler)
			handler(&crw[0], chain ? &crw[1] : NULL, 0);
		mutex_unlock(&crw_handler_mutex);
		/* chain is always 0 or 1 here. */
		chain = crw[chain].chn ? chain + 1 : 0;
	}
	goto repeat;
	return 0;
}
Пример #27
0
// -------------------------------------------------------------------------
ssize_t bks_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
{
    ssize_t bytes_transferred = 0;
    size_t bytes_to_copy = 0;
    unsigned long length =  0;
    unsigned long remain =  0;
    long ec = -1;
    struct BksDeviceContext* ctx = file->private_data;
    int nr = 0;
    unsigned char* ptr = 0;

    if (!ctx) {
        bytes_transferred = -EINVAL;
        goto exit; // not much we can do.
    } else {
        nr = ctx->nr;
        ptr = ctx->ptr;
    }
    printk(KERN_INFO "[bks] write(), context: %p, minor: %d, ptr: %p\n", ctx, nr, ptr);

    // acquire the lock
    if (0 != down_interruptible(&bks_buffer_lock[nr])) {
        printk(KERN_INFO "[bks] write(), down_interruptible() returned non zero\n");
        // interrupted. bail out
        bytes_transferred = -EINTR;
        goto exit;
    }


    // always add data to the end - no matter where we were previously.
    ptr = bks_buffer_end[nr];

    // how many bytes of data are already in the buffer?
    //length = ptr - bks_buffer_begin[nr];
    length = bks_buffer_end[nr] - bks_buffer_begin[nr];
    // how many bytes of free space remains in the buffer?
    remain =  bks_buffer_size[nr] - length; // getBufferAvailableSpace(nr)

    printk(KERN_INFO "[bks] write(), length: %lu, remain: %lu\n", length, remain);

    if (count < 0) {
        bytes_to_copy = 0;
    } else if (count < remain) {
        bytes_to_copy = count;
    } else {
        bytes_to_copy = remain;
    }

    if (bytes_to_copy) {
        ec = copy_from_user(ptr, buf, bytes_to_copy);
    }

    if (!ec) {
        bytes_transferred = bytes_to_copy;
        bks_buffer_end[nr] += bytes_transferred;
    } else {
        bytes_transferred = 0;
    }

    ptr += bytes_transferred;
    ctx->ptr = ptr;

    // release the lock
    up(&bks_buffer_lock[nr]);


exit:
    printk(KERN_INFO "[bks] write(), transferred %d bytes\n", bytes_transferred);
    return bytes_transferred;
}
Пример #28
0
static int AOTOMdev_ioctl(struct inode *Inode, struct file *File, unsigned int cmd, unsigned long arg)
{
	int icon_nr = 0;
	static int mode = 0;
	int res = -EINVAL;
	dprintk(5, "%s > 0x%.8x\n", __func__, cmd);

	if(down_interruptible (&write_sem))
		return -ERESTARTSYS;

	switch(cmd) {
	case VFDSETMODE:
	case VFDSETLED:
	case VFDICONDISPLAYONOFF:
	case VFDSETTIME:
	case VFDBRIGHTNESS:
		if (copy_from_user(&aotom_data, (void *) arg, sizeof(aotom_data)))
			return -EFAULT;
	}

	switch(cmd) {
	case VFDSETMODE:
		mode = aotom_data.u.mode.compat;
		break;
	case VFDSETLED:
#if defined(SPARK) || defined(SPARK7162)
		if (aotom_data.u.led.led_nr > -1 && aotom_data.u.led.led_nr < LED_MAX) {
			switch (aotom_data.u.led.on) {
			case LOG_OFF:
			case LOG_ON:
				res = YWPANEL_VFD_SetLed(aotom_data.u.led.led_nr, aotom_data.u.led.on);
				led_state[aotom_data.u.led.led_nr].state = aotom_data.u.led.on;
				break;
			default: // toggle (for aotom_data.u.led.on * 10) ms
				flashLED(aotom_data.u.led.led_nr, aotom_data.u.led.on * 10);
			}
		}
#endif
		break;
	case VFDBRIGHTNESS:
		if (aotom_data.u.brightness.level < 0)
			aotom_data.u.brightness.level = 0;
		else if (aotom_data.u.brightness.level > 7)
			aotom_data.u.brightness.level = 7;
		res = YWPANEL_VFD_SetBrightness(aotom_data.u.brightness.level);
		break;
	case VFDICONDISPLAYONOFF:
	{
#if defined(SPARK)
		switch (aotom_data.u.icon.icon_nr) {
		case 0:
			res = YWPANEL_VFD_SetLed(LED_RED, aotom_data.u.icon.on);
			led_state[LED_RED].state = aotom_data.u.icon.on;
			break;
		case 35:
			res = YWPANEL_VFD_SetLed(LED_GREEN, aotom_data.u.icon.on);
			led_state[LED_GREEN].state = aotom_data.u.icon.on;
			break;
		default:
			break;
		}
#endif
#if defined(SPARK7162)
		icon_nr = aotom_data.u.icon.icon_nr;
		//e2 icons workarround
		//printk("icon_nr = %d\n", icon_nr);
		if (icon_nr >= 256) {
			icon_nr = icon_nr / 256;
			switch (icon_nr) {
			case 0x11:
				icon_nr = 0x0E; //widescreen
				break;
			case 0x13:
				icon_nr = 0x0B; //CA
				break;
			case 0x15:
				icon_nr = 0x19; //mp3
				break;
			case 0x17:
				icon_nr = 0x1A; //ac3
				break;
			case 0x1A:
				icon_nr = 0x03; //play
				break;
			case 0x1e:
				icon_nr = 0x07; //record
				break;
			case 38:
				break; //cd part1
			case 39:
				break; //cd part2
			case 40:
				break; //cd part3
			case 41:
				break; //cd part4
			default:
				icon_nr = 0; //no additional symbols at the moment
				break;
			}
		}	  
		if (aotom_data.u.icon.on != 0)
			aotom_data.u.icon.on = 1;
		if (icon_nr > 0 && icon_nr <= 45 )
			res = aotomSetIcon(icon_nr, aotom_data.u.icon.on);
		if (icon_nr == 46){
			switch (aotom_data.u.icon.on){
			case 1:
				VFD_set_all_icons();
				res = 0;
				break;
			case 0:
				VFD_clear_all_icons();
				res = 0;
				break;
			default:
				break;
			}
		}
#endif		
		mode = 0;
		break;
	}
	case VFDSTANDBY:
	{
#if defined(SPARK) || defined(SPARK7162)
		u32 uTime = 0;
		//u32 uStandByKey = 0;
		//u32 uPowerOnTime = 0;
		get_user(uTime, (int *) arg);
		//printk("uTime = %d\n", uTime);

		//uPowerOnTime = YWPANEL_FP_GetPowerOnTime();
		//printk("1uPowerOnTime = %d\n", uPowerOnTime);

		YWPANEL_FP_SetPowerOnTime(uTime);

		//uPowerOnTime = YWPANEL_FP_GetPowerOnTime();
		//printk("2uPowerOnTime = %d\n", uPowerOnTime);
		#if 0
		uStandByKey = YWPANEL_FP_GetStandByKey(0);
		printk("uStandByKey = %d\n", uStandByKey);
		uStandByKey = YWPANEL_FP_GetStandByKey(1);
		printk("uStandByKey = %d\n", uStandByKey);
		uStandByKey = YWPANEL_FP_GetStandByKey(2);
		printk("uStandByKey = %d\n", uStandByKey);
		uStandByKey = YWPANEL_FP_GetStandByKey(3);
		printk("uStandByKey = %d\n", uStandByKey);
		uStandByKey = YWPANEL_FP_GetStandByKey(4);
		printk("uStandByKey = %d\n", uStandByKey);
		#endif
		clear_display();
		YWPANEL_FP_ControlTimer(true);
		YWPANEL_FP_SetCpuStatus(YWPANEL_CPUSTATE_STANDBY);
		res = 0;
#endif
		break;
	}
	case VFDSETTIME2:
	{
		u32 uTime = 0;
		res = get_user(uTime, (int *)arg);
		if (! res)
		{
			res = YWPANEL_FP_SetTime(uTime);
			YWPANEL_FP_ControlTimer(true);
		}
		break;
	}
	case VFDSETTIME:
		res = aotomSetTime(aotom_data.u.time.time);
		break;
	case VFDGETTIME:
	{
#if defined(SPARK) || defined(SPARK7162)
		u32 uTime = 0;
		uTime = YWPANEL_FP_GetTime();
		//printk("uTime = %d\n", uTime);
		res = put_user(uTime, (int *) arg);
#endif
		break;
	}
	case VFDGETWAKEUPMODE:
		break;
	case VFDDISPLAYCHARS:
		if (mode == 0)
		{
			if (copy_from_user(&vfd_data, (void *) arg, sizeof(vfd_data)))
				return -EFAULT;
			if (vfd_data.length > sizeof(vfd_data.data))
				vfd_data.length = sizeof(vfd_data.data);
			while ((vfd_data.length > 0) && (vfd_data.data[vfd_data.length - 1 ] == '\n'))
				vfd_data.length--;
				res = run_draw_thread(&vfd_data);
		} else
			mode = 0;
		break;
	case VFDDISPLAYWRITEONOFF:
		break;
	case VFDDISPLAYCLR:
		vfd_data.length = 0;
		res = run_draw_thread(&vfd_data);
		break;
#if defined(SPARK)
	case 0x5305:
		res = 0;
		break;
#endif
	case 0x5401:
		res = 0;
		break;
	case VFDGETSTARTUPSTATE:
	{
		YWPANEL_STARTUPSTATE_t State;
		if (YWPANEL_FP_GetStartUpState(&State))
			res = put_user(State, (int *) arg);
		break;
	}
	case VFDGETVERSION:
	{
		YWPANEL_Version_t panel_version;
		memset(&panel_version, 0, sizeof(YWPANEL_Version_t));
		if (YWPANEL_FP_GetVersion(&panel_version))
			res = put_user (panel_version.DisplayInfo, (int *)arg);
		break;
	}

	default:
		printk("VFD/AOTOM: unknown IOCTL 0x%x\n", cmd);
		mode = 0;
		break;
	}

	up(&write_sem);

	dprintk(5, "%s <\n", __func__);
	return res;
}
Пример #29
0
unsigned long oxnas_copy_from_user(void *to, const void __user *from, unsigned long count)
{
    int pages_mapped, i;
    unsigned long transfered = 0;
    struct page *pages[2];
    oxnas_dma_channel_t *channel;
    unsigned long uaddr = (unsigned long)from;
    unsigned long end_page = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
    unsigned long start_page = uaddr >> PAGE_SHIFT;
    int nr_pages = end_page - start_page;
//printk("F 0x%08lx 0x%08lx %lu -> %lu, %lu, %d\n", (unsigned long)to, (unsigned long)from, count, start_page, end_page, nr_pages);

    might_sleep();

    BUG_ON(nr_pages > 2);

    // Only support a single concurrent copy operation, as only using a single
    // DMA channel for now
    while (down_interruptible(&copy_mutex));

    // Get kernel mappings for the user pages
    down_read(&current->mm->mmap_sem);
    pages_mapped = get_user_pages(current, current->mm, uaddr, nr_pages, 0, 0, pages, NULL);
    up_read(&current->mm->mmap_sem);

    if (pages_mapped != nr_pages) {
        // Didn't get mappings for all pages requested, so release any we did get
        for (i=0; i < pages_mapped; ++i) {
            page_cache_release(pages[i]);
        }
        pages_mapped = 0;
    }

    if (pages_mapped) {
        int i;
        struct scatterlist sl;
        struct scatterlist gl[2];

        // Fill gathering DMA descriptors
        gl[0].page = pages[0]; 
        gl[0].offset = uaddr & ~PAGE_MASK;
        if (pages_mapped > 1) {
            gl[0].length = PAGE_SIZE - gl[0].offset;
            gl[1].offset = 0;
            gl[1].page = pages[1]; 
            gl[1].length = count - gl[0].length;
        } else {
            gl[0].length = count;
        }

        // Create DMA mappings for all the user pages
        for (i=0; i < pages_mapped; ++i) {
            gl[i].dma_address = dma_map_single(0, page_address(gl[i].page) + gl[i].offset, gl[i].length, DMA_TO_DEVICE);
        }

        // Create a DMA mapping for the kernel memory range
        sl.dma_address = dma_map_single(0, to, count, DMA_FROM_DEVICE);
        sl.length = count;

        // Allocate a DMA channel
        channel = oxnas_dma_request(1);
        BUG_ON(channel == OXNAS_DMA_CHANNEL_NUL);

        // Do DMA from user to kernel memory
        oxnas_dma_set_sg(
            channel,
            gl,
            pages_mapped,
            &sl,
            1,
            OXNAS_DMA_MODE_INC,
            OXNAS_DMA_MODE_INC,
            0);

        // Using notification callback
        oxnas_dma_set_callback(channel, dma_callback, OXNAS_DMA_CALLBACK_ARG_NUL);
        oxnas_dma_start(channel);

        // Sleep until transfer complete
        while (down_interruptible(&callback_semaphore));
        oxnas_dma_set_callback(channel, OXNAS_DMA_CALLBACK_NUL, OXNAS_DMA_CALLBACK_ARG_NUL);
        transfered += count;

        // Release the DMA channel
        oxnas_dma_free(channel);

        // Release kernel DMA mapping
        dma_unmap_single(0, sl.length, count, DMA_FROM_DEVICE);
        // Release user DMA mappings
        for (i=0; i < pages_mapped; ++i) {
            dma_unmap_single(0, gl[i].dma_address, gl[i].length, DMA_TO_DEVICE);
        }

        // Release user pages
        for (i=0; i < pages_mapped; ++i) {
            page_cache_release(pages[i]);
        }
    }

    up(&copy_mutex);

    return count - transfered;
}
Пример #30
0
static int send_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t total_len)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
	struct sk_buff *buf;
	int needs_conn;
	int res = -EINVAL;

	if (unlikely(!dest))
		return -EDESTADDRREQ;
	if (unlikely((m->msg_namelen < sizeof(*dest)) ||
		     (dest->family != AF_TIPC)))
		return -EINVAL;

	needs_conn = (sock->state != SS_READY);
	if (unlikely(needs_conn)) {
		if (sock->state == SS_LISTENING)
			return -EPIPE;
		if (sock->state != SS_UNCONNECTED)
			return -EISCONN;
		if ((tsock->p->published) ||
		    ((sock->type == SOCK_STREAM) && (total_len != 0)))
			return -EOPNOTSUPP;
		if (dest->addrtype == TIPC_ADDR_NAME) {
			tsock->p->conn_type = dest->addr.name.name.type;
			tsock->p->conn_instance = dest->addr.name.name.instance;
		}
	}

	if (down_interruptible(&tsock->sem))
		return -ERESTARTSYS;

	if (needs_conn) {

		/* Abort any pending connection attempts (very unlikely) */

		while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
			atomic_dec(&tipc_queue_size);
		}

		sock->state = SS_CONNECTING;
	}

	do {
		if (dest->addrtype == TIPC_ADDR_NAME) {
			if ((res = dest_name_check(dest, m)))
				goto exit;
			res = tipc_send2name(tsock->p->ref,
					     &dest->addr.name.name,
					     dest->addr.name.domain,
					     m->msg_iovlen,
					     m->msg_iov);
		}
		else if (dest->addrtype == TIPC_ADDR_ID) {
			res = tipc_send2port(tsock->p->ref,
					     &dest->addr.id,
					     m->msg_iovlen,
					     m->msg_iov);
		}
		else if (dest->addrtype == TIPC_ADDR_MCAST) {
			if (needs_conn) {
				res = -EOPNOTSUPP;
				goto exit;
			}
			if ((res = dest_name_check(dest, m)))
				goto exit;
			res = tipc_multicast(tsock->p->ref,
					     &dest->addr.nameseq,
					     0,
					     m->msg_iovlen,
					     m->msg_iov);
		}
		if (likely(res != -ELINKCONG)) {
exit:
			up(&tsock->sem);
			return res;
		}
		if (m->msg_flags & MSG_DONTWAIT) {
			res = -EWOULDBLOCK;
			goto exit;
		}
		if (wait_event_interruptible(*sock->sk->sk_sleep,
					     !tsock->p->congested)) {
		    res = -ERESTARTSYS;
		    goto exit;
		}
	} while (1);
}