Beispiel #1
0
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
	zfcp_dbf_rec_run("erator1", erp_action);
}
Beispiel #2
0
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *work)
{
	struct gs_port *port = container_of(work,
					struct gs_port,
					push_work);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			if (MODEM_DEBUG_ON)
				printk("%d: tty_insert %d\n", port->port_num, size);

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				if (MODEM_DEBUG_ON) {
					printk("%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
					list_move(&req->list, &port->read_pool);
				break;
			}
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
	}

	/* Push from tty to ldisc; without low_latency set this is handled by
	 * a workqueue, so we won't get callbacks and can hold port_lock
	 */
	if ((tty && do_push)) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the tasklet
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gs_tty_wq, &port->push_work);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(unsigned long _port)
{
	struct gs_port		*port = (void *)_port;
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;
	static unsigned int	skip = 0;
	static DEFINE_RATELIMIT_STATE(ratelimit, 1 * HZ, 10);

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port.tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* leave data queued if tty was rx throttled */
		if (tty && test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		if (__ratelimit(&ratelimit)) {
			printk( ACM_LOG \
				"%s: ttyGS%d: actual=%d, n_read=%d 0x%02x 0x%02x 0x%02x ...\n", \
				__func__, port->port_num, req->actual, port->n_read,
				*((u8 *)req->buf), *((u8 *)req->buf+1), *((u8 *)req->buf+2));
			if (skip > 0) {
				printk( ACM_LOG "%s Too many data, skipped %d bytes", __func__, skip);
				skip = 0;
			}
		} else
			skip += req->actual;

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(&port->port, packet,
					size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}

		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; without low_latency set this is handled by
	 * a workqueue, so we won't get callbacks and can hold port_lock
	 */
	if (do_push)
		tty_flip_buffer_push(&port->port);


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the tasklet
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				tasklet_schedule(&port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
/**
 * isci_terminate_pending_requests() - This function will change the all of the
 *    requests on the given device's state to "aborting", will terminate the
 *    requests, and wait for them to complete.  This function must only be
 *    called from a thread that can wait.  Note that the requests are all
 *    terminated and completed (back to the host, if started there).
 * @isci_host: This parameter specifies SCU.
 * @idev: This parameter specifies the target.
 *
 */
void isci_terminate_pending_requests(struct isci_host *ihost,
				     struct isci_remote_device *idev)
{
	struct completion request_completion;
	enum isci_request_status old_state;
	unsigned long flags;
	LIST_HEAD(list);

	spin_lock_irqsave(&ihost->scic_lock, flags);
	list_splice_init(&idev->reqs_in_process, &list);

	/* assumes that isci_terminate_request_core deletes from the list */
	while (!list_empty(&list)) {
		struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);

		/* Change state to "terminating" if it is currently
		 * "started".
		 */
		old_state = isci_request_change_started_to_newstate(ireq,
								    &request_completion,
								    terminating);
		switch (old_state) {
		case started:
		case completed:
		case aborting:
			break;
		default:
			/* termination in progress, or otherwise dispositioned.
			 * We know the request was on 'list' so should be safe
			 * to move it back to reqs_in_process
			 */
			list_move(&ireq->dev_node, &idev->reqs_in_process);
			ireq = NULL;
			break;
		}

		if (!ireq)
			continue;
		spin_unlock_irqrestore(&ihost->scic_lock, flags);

		init_completion(&request_completion);

		dev_dbg(&ihost->pdev->dev,
			 "%s: idev=%p request=%p; task=%p old_state=%d\n",
			 __func__, idev, ireq,
			(!test_bit(IREQ_TMF, &ireq->flags)
				? isci_request_access_task(ireq)
				: NULL),
			old_state);

		/* If the old_state is started:
		 * This request was not already being aborted. If it had been,
		 * then the aborting I/O (ie. the TMF request) would not be in
		 * the aborting state, and thus would be terminated here.  Note
		 * that since the TMF completion's call to the kernel function
		 * "complete()" does not happen until the pending I/O request
		 * terminate fully completes, we do not have to implement a
		 * special wait here for already aborting requests - the
		 * termination of the TMF request will force the request
		 * to finish it's already started terminate.
		 *
		 * If old_state == completed:
		 * This request completed from the SCU hardware perspective
		 * and now just needs cleaning up in terms of freeing the
		 * request and potentially calling up to libsas.
		 *
		 * If old_state == aborting:
		 * This request has already gone through a TMF timeout, but may
		 * not have been terminated; needs cleaning up at least.
		 */
		isci_terminate_request_core(ihost, idev, ireq);
		spin_lock_irqsave(&ihost->scic_lock, flags);
	}
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
Beispiel #5
0
static void jffs2_erase_block(struct jffs2_sb_info *c,
			      struct jffs2_eraseblock *jeb)
{
	int ret;
	uint32_t bad_offset;
#ifdef __ECOS
       ret = jffs2_flash_erase(c, jeb);
       if (!ret) {
	       jffs2_erase_succeeded(c, jeb);
	       return;
       }
       bad_offset = jeb->offset;
#else /* Linux */
	struct erase_info *instr;

//	D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n",
;
	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
	if (!instr) {
;
		mutex_lock(&c->erase_free_sem);
		spin_lock(&c->erase_completion_lock);
		list_move(&jeb->list, &c->erase_pending_list);
		c->erasing_size -= c->sector_size;
		c->dirty_size += c->sector_size;
		jeb->dirty_size = c->sector_size;
		spin_unlock(&c->erase_completion_lock);
		mutex_unlock(&c->erase_free_sem);
		return;
	}

	memset(instr, 0, sizeof(*instr));

	instr->mtd = c->mtd;
	instr->addr = jeb->offset;
	instr->len = c->sector_size;
	instr->callback = jffs2_erase_callback;
	instr->priv = (unsigned long)(&instr[1]);
	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;

	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
	((struct erase_priv_struct *)instr->priv)->c = c;

	ret = c->mtd->erase(c->mtd, instr);
	if (!ret)
		return;

	bad_offset = instr->fail_addr;
	kfree(instr);
#endif /* __ECOS */

	if (ret == -ENOMEM || ret == -EAGAIN) {
		/* Erase failed immediately. Refile it on the list */
;
		mutex_lock(&c->erase_free_sem);
		spin_lock(&c->erase_completion_lock);
		list_move(&jeb->list, &c->erase_pending_list);
		c->erasing_size -= c->sector_size;
		c->dirty_size += c->sector_size;
		jeb->dirty_size = c->sector_size;
		spin_unlock(&c->erase_completion_lock);
		mutex_unlock(&c->erase_free_sem);
		return;
	}

	if (ret == -EROFS)
;
	else
;

	jffs2_erase_failed(c, jeb, bad_offset);
}
Beispiel #6
0
static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
{
	list_move(&rdesc->list, &sqi->bd_list_free);
}
Beispiel #7
0
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
		struct list_head local_allocated;

		INIT_LIST_HEAD(&local_allocated);

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}
			/* dma address must be no more than 36 bits */
			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
			/* and also 256 byte aligned! */
			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->write_pool;
	struct usb_ep		*in = port->port_usb->in;
	int			status = 0;
	static long 		prev_len;
	bool			do_tty_wake = false;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
		if (len == 0) {
			/* Queue zero length packet */
#if 1 //QCT_SBA
			if (prev_len && (prev_len % in->maxpacket == 0)) {
#else
			if (prev_len & (prev_len % in->maxpacket == 0)) {
#endif
				req->length = 0;
				list_del(&req->list);

				spin_unlock(&port->port_lock);
				status = usb_ep_queue(in, req, GFP_ATOMIC);
				spin_lock(&port->port_lock);
				if (!port->port_usb) {
					gs_free_req(in, req);
					break;
				}
				if (status) {
					printk(KERN_ERR "%s: %s err %d\n",
					__func__, "queue", status);
					list_add(&req->list, pool);
				}
				prev_len = 0;
			}
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			do_tty_wake = false;
			gs_free_req(in, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}
		prev_len = req->length;

	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}

/*
 * Context: caller owns port_lock, and port_usb is set
 */
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->read_pool;
	struct usb_ep		*out = port->port_usb->out;
	unsigned		started = 0;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			status;
		struct tty_struct	*tty;

		/* no more rx if closed */
		tty = port->port_tty;
		if (!tty)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		list_del(&req->list);
		req->length = RX_BUF_SIZE;

		/* drop lock while we call out; the controller driver
		 * may need to call us back (e.g. for disconnect)
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(out, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			started = 0;
			gs_free_req(out, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", out->name, status);
			list_add(&req->list, pool);
			break;
		}
		started++;

	}
	return started;
}

/*
 * RX work queue takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
	}

	/* Push from tty to ldisc; this is immediate with low_latency, and
	 * may trigger callbacks to this driver ... so drop the spinlock.
	 */
	if (tty && do_push) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}

static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct gs_port	*port = ep->driver_data;
	unsigned long flags;

	/* Queue all received data until the tty layer is ready for it. */
	spin_lock_irqsave(&port->port_lock, flags);
	list_add_tail(&req->list, &port->read_queue);
	queue_work(gserial_wq, &port->push);
	spin_unlock_irqrestore(&port->port_lock, flags);
}

static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct gs_port	*port = ep->driver_data;
	unsigned long flags;

	spin_lock_irqsave(&port->port_lock, flags);
	list_add(&req->list, &port->write_pool);

	switch (req->status) {
	default:
		/* presumably a transient fault */
		pr_warning("%s: unexpected %s status %d\n",
				__func__, ep->name, req->status);
		/* FALL THROUGH */
	case 0:
		/* normal completion */
		if (port->port_usb)
			gs_start_tx(port);
		break;

	case -ESHUTDOWN:
		/* disconnect */
		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
		break;
	}

	spin_unlock_irqrestore(&port->port_lock, flags);
}

static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
{
	struct usb_request	*req;

	while (!list_empty(head)) {
		req = list_entry(head->next, struct usb_request, list);
		list_del(&req->list);
		gs_free_req(ep, req);
	}
}

static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
		int num, int size,
		void (*fn)(struct usb_ep *, struct usb_request *))
{
	int			i;
	struct usb_request	*req;

	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
	 * do quite that many this time, don't fail ... we just won't
	 * be as speedy as we might otherwise be.
	 */
	for (i = 0; i < num; i++) {
		req = gs_alloc_req(ep, size, GFP_ATOMIC);
		if (!req)
			return list_empty(head) ? -ENOMEM : 0;
		req->complete = fn;
		list_add_tail(&req->list, head);
	}
	return 0;
}
Beispiel #9
0
/*
 * Find an eligible tree to time-out
 * A tree is eligible if :-
 *  - it is unused by any user process
 *  - it has been unused for exp_timeout time
 */
struct dentry *autofs4_expire_indirect(struct super_block *sb,
				       struct vfsmount *mnt,
				       struct autofs_sb_info *sbi,
				       int how)
{
	unsigned long timeout;
	struct dentry *root = sb->s_root;
	struct dentry *dentry;
	struct dentry *expired = NULL;
	int do_now = how & AUTOFS_EXP_IMMEDIATE;
	int exp_leaves = how & AUTOFS_EXP_LEAVES;
	struct autofs_info *ino;
	unsigned int ino_count;

	if (!root)
		return NULL;

	now = jiffies;
	timeout = sbi->exp_timeout;

	dentry = NULL;
	while ((dentry = get_next_positive_subdir(dentry, root))) {
		spin_lock(&sbi->fs_lock);
		ino = autofs4_dentry_ino(dentry);
		/* No point expiring a pending mount */
		if (ino->flags & AUTOFS_INF_PENDING)
			goto next;

		/*
		 * Case 1: (i) indirect mount or top level pseudo direct mount
		 *	   (autofs-4.1).
		 *	   (ii) indirect mount with offset mount, check the "/"
		 *	   offset (autofs-5.0+).
		 */
		if (d_mountpoint(dentry)) {
			DPRINTK("checking mountpoint %p %.*s",
				dentry, (int)dentry->d_name.len, dentry->d_name.name);

			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 2;
			if (dentry->d_count > ino_count)
				goto next;

			/* Can we umount this guy */
			if (autofs4_mount_busy(mnt, dentry))
				goto next;

			/* Can we expire this guy */
			if (autofs4_can_expire(dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
			goto next;
		}

		if (simple_empty(dentry))
			goto next;

		/* Case 2: tree mount, expire iff entire tree is not busy */
		if (!exp_leaves) {
			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 1;
			if (dentry->d_count > ino_count)
				goto next;

			if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
		/*
		 * Case 3: pseudo direct mount, expire individual leaves
		 *	   (autofs-4.1).
		 */
		} else {
			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 1;
			if (dentry->d_count > ino_count)
				goto next;

			expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
			if (expired) {
				dput(dentry);
				goto found;
			}
		}
next:
		spin_unlock(&sbi->fs_lock);
	}
	return NULL;

found:
	DPRINTK("returning %p %.*s",
		expired, (int)expired->d_name.len, expired->d_name.name);
	ino = autofs4_dentry_ino(expired);
	ino->flags |= AUTOFS_INF_EXPIRING;
	init_completion(&ino->expire_complete);
	spin_unlock(&sbi->fs_lock);
	spin_lock(&sbi->lookup_lock);
	spin_lock(&expired->d_parent->d_lock);
	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
	list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
	spin_unlock(&expired->d_lock);
	spin_unlock(&expired->d_parent->d_lock);
	spin_unlock(&sbi->lookup_lock);
	return expired;
}
/**
 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
 * non-periodic schedule. The QH is added to the inactive non-periodic
 * schedule if any QTDs are still attached to the QH.
 *
 * For periodic QHs, the QH is removed from the periodic queued schedule. If
 * there are any QTDs still attached to the QH, the QH is added to either the
 * periodic inactive schedule or the periodic ready schedule and its next
 * scheduled frame is calculated. The QH is placed in the ready schedule if
 * the scheduled frame has been reached already. Otherwise it's placed in the
 * inactive schedule. If there are no QTDs attached to the QH, the QH is
 * completely removed from the periodic schedule.
 */
void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split)
{
	unsigned long flags;
	SPIN_LOCK_IRQSAVE(&hcd->lock, flags);

	if (dwc_qh_is_non_per(qh)) {
		dwc_otg_hcd_qh_remove(hcd, qh);
		if (!list_empty(&qh->qtd_list)) {
			/* Add back to inactive non-periodic schedule. */
			dwc_otg_hcd_qh_add(hcd, qh);
		}
	} else {
		uint16_t frame_number =	dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));

		if (qh->do_split) {
			/* Schedule the next continuing periodic split transfer */
			if (sched_next_periodic_split) {

				qh->sched_frame = frame_number;
				if (dwc_frame_num_le(frame_number,
						     dwc_frame_num_inc(qh->start_split_frame, 1))) {
					/*
					 * Allow one frame to elapse after start
					 * split microframe before scheduling
					 * complete split, but DONT if we are
					 * doing the next start split in the
					 * same frame for an ISOC out.
					 */
					if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) {
						qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1);
					}
				}
			} else {
				qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame,
								     qh->interval);
				if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
					qh->sched_frame = frame_number;
				}
				qh->sched_frame |= 0x7;
				qh->start_split_frame = qh->sched_frame;
			}
		} else {
			qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval);
			if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
				qh->sched_frame = frame_number;
			}
		}

		if (list_empty(&qh->qtd_list)) {
			dwc_otg_hcd_qh_remove(hcd, qh);
		} else {
			/*
			 * Remove from periodic_sched_queued and move to
			 * appropriate queue.
			 */
			if (qh->sched_frame == frame_number) {
				list_move(&qh->qh_list_entry,
					  &hcd->periodic_sched_ready);
			} else {
				list_move(&qh->qh_list_entry,
					  &hcd->periodic_sched_inactive);
			}
		}
	}

	SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
}
Beispiel #11
0
static void jffs2_erase_block(struct jffs2_sb_info *c,
			      struct jffs2_eraseblock *jeb)
{
	int ret;
	uint32_t bad_offset;
#ifdef __ECOS
       ret = jffs2_flash_erase(c, jeb);
       if (!ret) {
	       jffs2_erase_succeeded(c, jeb);
	       return;
       }
       bad_offset = jeb->offset;
#else /* Linux */
	struct erase_info *instr;

	D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n",
				jeb->offset, jeb->offset, jeb->offset + c->sector_size));
	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
	if (!instr) {
		printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
		down(&c->erase_free_sem);
		spin_lock(&c->erase_completion_lock);
		list_move(&jeb->list, &c->erase_pending_list);
		c->erasing_size -= c->sector_size;
		c->dirty_size += c->sector_size;
		jeb->dirty_size = c->sector_size;
		spin_unlock(&c->erase_completion_lock);
		up(&c->erase_free_sem);
		return;
	}

	memset(instr, 0, sizeof(*instr));

	instr->mtd = c->mtd;
	instr->addr = jeb->offset;
	instr->len = c->sector_size;
	instr->callback = jffs2_erase_callback;
	instr->priv = (unsigned long)(&instr[1]);
	instr->fail_addr = 0xffffffff;

	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
	((struct erase_priv_struct *)instr->priv)->c = c;

	ret = c->mtd->erase(c->mtd, instr);
	if (!ret)
		return;

	bad_offset = instr->fail_addr;
	kfree(instr);
#endif /* __ECOS */

	if (ret == -ENOMEM || ret == -EAGAIN) {
		/* Erase failed immediately. Refile it on the list */
		D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
		down(&c->erase_free_sem);
		spin_lock(&c->erase_completion_lock);
		list_move(&jeb->list, &c->erase_pending_list);
		c->erasing_size -= c->sector_size;
		c->dirty_size += c->sector_size;
		jeb->dirty_size = c->sector_size;
		spin_unlock(&c->erase_completion_lock);
		up(&c->erase_free_sem);
		return;
	}

	if (ret == -EROFS)
		printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
	else
		printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);

	jffs2_erase_failed(c, jeb, bad_offset);
}
Beispiel #12
0
int modify_job_attr(

  job      *pjob,  /* I (modified) */
  svrattrl *plist, /* I */
  int       perm,
  int      *bad)   /* O */

  {
  int            allow_unkn = -1;
  long           i;
  pbs_attribute  newattr[JOB_ATR_LAST];
  pbs_attribute *pattr;
  int            rc;
  char           log_buf[LOCAL_LOG_BUF_SIZE];
  pbs_queue     *pque;

  if ((pque = get_jobs_queue(&pjob)) != NULL)
    {
    mutex_mgr pque_mutex = mutex_mgr(pque->qu_mutex, true);
    if (pque->qu_qs.qu_type != QTYPE_Execution)
      allow_unkn = JOB_ATR_UNKN;

    }
  else if (pjob->ji_parent_job != NULL)
    {
    allow_unkn = JOB_ATR_UNKN;
    }
  else
    {
    log_err(PBSE_JOBNOTFOUND, __func__, "Job lost while acquiring queue 5");
    return(PBSE_JOBNOTFOUND);
    }

  pattr = pjob->ji_wattr;

  /* call attr_atomic_set to decode and set a copy of the attributes */

  rc = attr_atomic_set(
         plist,        /* I */
         pattr,        /* I */
         newattr,      /* O */
         job_attr_def, /* I */
         JOB_ATR_LAST,
         allow_unkn,   /* I */
         perm,         /* I */
         bad);         /* O */

  /* if resource limits are being changed ... */

  if ((rc == 0) &&
      (newattr[JOB_ATR_resource].at_flags & ATR_VFLAG_SET))
    {
    if ((perm & (ATR_DFLAG_MGWR | ATR_DFLAG_OPWR)) == 0)
      {
      /* If job is running, only manager/operator can raise limits */

      if (pjob->ji_qs.ji_state == JOB_STATE_RUNNING)
        {
        long lim = TRUE;
        int comp_resc_lt;
       
        get_svr_attr_l(SRV_ATR_QCQLimits, &lim);
        comp_resc_lt = comp_resc2(&pjob->ji_wattr[JOB_ATR_resource],
                                      &newattr[JOB_ATR_resource],
                                      lim,
                                      NULL,
                                      LESS);

        if (comp_resc_lt != 0)
          {
          rc = PBSE_PERM;
          }
        }

      /* Also check against queue and system limits */

      if (rc == 0)
        {
        if ((pque = get_jobs_queue(&pjob)) != NULL)
          {
          mutex_mgr pque_mutex = mutex_mgr(pque->qu_mutex, true);
          rc = chk_resc_limits( &newattr[JOB_ATR_resource], pque, NULL);
          }
        else if (pjob == NULL)
          {
          log_err(PBSE_JOBNOTFOUND, __func__, "Job lost while acquiring queue 6");
          return(PBSE_JOBNOTFOUND);
          }
        else
          rc = PBSE_QUENOTAVAILABLE;
        }
      }
    }    /* END if ((rc == 0) && ...) */

  /* special check on permissions for hold */

  if ((rc == 0) &&
      (newattr[JOB_ATR_hold].at_flags & ATR_VFLAG_MODIFY))
    {
    i = newattr[JOB_ATR_hold].at_val.at_long ^
        (pattr + JOB_ATR_hold)->at_val.at_long;

    rc = chk_hold_priv(i, perm);
    }

  if (rc == 0)
    {
    for (i = 0;i < JOB_ATR_LAST;i++)
      {
      if (newattr[i].at_flags & ATR_VFLAG_MODIFY)
        {
        if (job_attr_def[i].at_action)
          {
          rc = job_attr_def[i].at_action(
                 &newattr[i],
                 pjob,
                 ATR_ACTION_ALTER);

          if (rc)
            break;
          }
        }
      }    /* END for (i) */

    if ((rc == 0) &&
        ((newattr[JOB_ATR_userlst].at_flags & ATR_VFLAG_MODIFY) ||
         (newattr[JOB_ATR_grouplst].at_flags & ATR_VFLAG_MODIFY)))
      {
      /* need to reset execution uid and gid */

      rc = set_jobexid(pjob, newattr, NULL);
      }

    if ((rc == 0) &&
        (newattr[JOB_ATR_outpath].at_flags & ATR_VFLAG_MODIFY))
      {
      /* need to recheck if JOB_ATR_outpath is a special case of host only */

      if (newattr[JOB_ATR_outpath].at_val.at_str[strlen(newattr[JOB_ATR_outpath].at_val.at_str) - 1] == ':')
        {
        std::string ds = "";
        newattr[JOB_ATR_outpath].at_val.at_str = strdup(prefix_std_file(pjob, ds, (int)'o'));
        }
      /*
       * if the output path was specified and ends with a '/'
       * then append the standard file name
       */
      else if (newattr[JOB_ATR_outpath].at_val.at_str[strlen(newattr[JOB_ATR_outpath].at_val.at_str) - 1] == '/')
        {
        std::string ds = "";
        newattr[JOB_ATR_outpath].at_val.at_str[strlen(newattr[JOB_ATR_outpath].at_val.at_str) - 1] = '\0';
        
        replace_attr_string(&newattr[JOB_ATR_outpath],
          (strdup(add_std_filename(pjob, newattr[JOB_ATR_outpath].at_val.at_str, (int)'o', ds))));
        }
      }

    if ((rc == 0) &&
        (newattr[JOB_ATR_errpath].at_flags & ATR_VFLAG_MODIFY))
      {
      /* need to recheck if JOB_ATR_errpath is a special case of host only */

      if (newattr[JOB_ATR_errpath].at_val.at_str[strlen(newattr[JOB_ATR_errpath].at_val.at_str) - 1] == ':')
        {
        std::string ds = "";
        newattr[JOB_ATR_errpath].at_val.at_str = strdup(prefix_std_file(pjob, ds, (int)'e'));
        }
      /*
       * if the error path was specified and ends with a '/'
       * then append the standard file name
       */
      else if (newattr[JOB_ATR_errpath].at_val.at_str[strlen(newattr[JOB_ATR_errpath].at_val.at_str) - 1] == '/')
        {
        std::string ds = "";
        newattr[JOB_ATR_errpath].at_val.at_str[strlen(newattr[JOB_ATR_errpath].at_val.at_str) - 1] = '\0';
        
        replace_attr_string(&newattr[JOB_ATR_errpath],
          (strdup(add_std_filename(pjob, newattr[JOB_ATR_errpath].at_val.at_str,(int)'e', ds))));
        }
      }

    }  /* END if (rc == 0) */

  if (rc != 0)
    {
    for (i = 0;i < JOB_ATR_LAST;i++)
      job_attr_def[i].at_free(newattr + i);

    /* FAILURE */

    return(rc);
    }  /* END if (rc != 0) */

  /* OK, now copy the new values into the job attribute array */

  for (i = 0;i < JOB_ATR_LAST;i++)
    {
    if (newattr[i].at_flags & ATR_VFLAG_MODIFY)
      {
      if (LOGLEVEL >= 7)
        {
        sprintf(log_buf, "attr %s modified", job_attr_def[i].at_name);

        log_event(PBSEVENT_JOB,PBS_EVENTCLASS_JOB,pjob->ji_qs.ji_jobid,log_buf);
        }

      job_attr_def[i].at_free(pattr + i);

      if ((newattr[i].at_type == ATR_TYPE_LIST) ||
          (newattr[i].at_type == ATR_TYPE_RESC))
        {
        list_move(
          &newattr[i].at_val.at_list,
          &(pattr + i)->at_val.at_list);
        }
      else
        {
        *(pattr + i) = newattr[i];
        }

      (pattr + i)->at_flags = newattr[i].at_flags;
      }
    }    /* END for (i) */

  /* note, the newattr[] attributes are on the stack, they go away automatically */

  pjob->ji_modified = 1;

  return(PBSE_NONE);
  }  /* END modify_job_attr() */
Beispiel #13
0
static void free_fixup(struct gcfixup *gcfixup)
{
	GCLOCK(&g_fixuplock);
	list_move(&gcfixup->link, &g_fixupvac);
	GCUNLOCK(&g_fixuplock);
}
Beispiel #14
0
static void free_schedunmap(struct gcschedunmap *gcschedunmap)
{
	GCLOCK(&g_unmaplock);
	list_move(&gcschedunmap->link, &g_unmapvac);
	GCUNLOCK(&g_unmaplock);
}
Beispiel #15
0
	list_for_each_entry_safe(spawn, n, spawns, list) {
		if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
			continue;

		list_move(&spawn->list, &top);
	}
Beispiel #16
0
/*
 * Find an eligible tree to time-out
 * A tree is eligible if :-
 *  - it is unused by any user process
 *  - it has been unused for exp_timeout time
 */
struct dentry *autofs4_expire_indirect(struct super_block *sb,
				       struct vfsmount *mnt,
				       struct autofs_sb_info *sbi,
				       int how)
{
	unsigned long timeout;
	struct dentry *root = sb->s_root;
	struct dentry *expired = NULL;
	struct list_head *next;
	int do_now = how & AUTOFS_EXP_IMMEDIATE;
	int exp_leaves = how & AUTOFS_EXP_LEAVES;
	struct autofs_info *ino;
	unsigned int ino_count;

	if (!root)
		return NULL;

	now = jiffies;
	timeout = sbi->exp_timeout;

	spin_lock(&dcache_lock);
	next = root->d_subdirs.next;

	/* On exit from the loop expire is set to a dgot dentry
	 * to expire or it's NULL */
	while ( next != &root->d_subdirs ) {
		struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);

		/* Negative dentry - give up */
		if (!simple_positive(dentry)) {
			next = next->next;
			continue;
		}

		dentry = dget(dentry);
		spin_unlock(&dcache_lock);

		spin_lock(&sbi->fs_lock);
		ino = autofs4_dentry_ino(dentry);

		/*
		 * Case 1: (i) indirect mount or top level pseudo direct mount
		 *	   (autofs-4.1).
		 *	   (ii) indirect mount with offset mount, check the "/"
		 *	   offset (autofs-5.0+).
		 */
		if (d_mountpoint(dentry)) {
			DPRINTK("checking mountpoint %p %.*s",
				dentry, (int)dentry->d_name.len, dentry->d_name.name);

			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 2;
			if (atomic_read(&dentry->d_count) > ino_count)
				goto next;

			/* Can we umount this guy */
			if (autofs4_mount_busy(mnt, dentry))
				goto next;

			/* Can we expire this guy */
			if (autofs4_can_expire(dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
			goto next;
		}

		if (simple_empty(dentry))
			goto next;

		/* Case 2: tree mount, expire iff entire tree is not busy */
		if (!exp_leaves) {
			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 1;
			if (atomic_read(&dentry->d_count) > ino_count)
				goto next;

			if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
		/*
		 * Case 3: pseudo direct mount, expire individual leaves
		 *	   (autofs-4.1).
		 */
		} else {
			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 1;
			if (atomic_read(&dentry->d_count) > ino_count)
				goto next;

			expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
			if (expired) {
				dput(dentry);
				goto found;
			}
		}
next:
		spin_unlock(&sbi->fs_lock);
		dput(dentry);
		spin_lock(&dcache_lock);
		next = next->next;
	}
	spin_unlock(&dcache_lock);
	return NULL;

found:
	DPRINTK("returning %p %.*s",
		expired, (int)expired->d_name.len, expired->d_name.name);
	ino = autofs4_dentry_ino(expired);
	ino->flags |= AUTOFS_INF_EXPIRING;
	init_completion(&ino->expire_complete);
	spin_unlock(&sbi->fs_lock);
	spin_lock(&dcache_lock);
	list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
	spin_unlock(&dcache_lock);
	return expired;
}
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
    struct gs_port		*port = container_of(w, struct gs_port, push);
    struct tty_struct	*tty;
    struct list_head	*queue = &port->read_queue;
    bool			disconnect = false;
    bool			do_push = false;

    /* hand any queued data to the tty */
    spin_lock_irq(&port->port_lock);
    tty = port->port_tty;
    while (!list_empty(queue)) {
        struct usb_request	*req;

        req = list_first_entry(queue, struct usb_request, list);

        /* discard data if tty was closed */
        if (!tty)
            goto recycle;

        /* leave data queued if tty was rx throttled */
        if (test_bit(TTY_THROTTLED, &tty->flags))
            break;

        switch (req->status) {
        case -ESHUTDOWN:
            disconnect = true;
            pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
            break;

        default:
            /* presumably a transient fault */
            pr_warning(PREFIX "%d: unexpected RX status %d\n",
                       port->port_num, req->status);
        /* FALLTHROUGH */
        case 0:
            /* normal completion */
            break;
        }

        /* push data to (open) tty */
        if (req->actual) {
            char		*packet = req->buf;
            unsigned	size = req->actual;
            unsigned	n;
            int		count;

            /* we may have pushed part of this packet already... */
            n = port->n_read;
            if (n) {
                packet += n;
                size -= n;
            }

            count = tty_insert_flip_string(tty, packet, size);
            port->nbytes_to_tty += count;
            if (count)
                do_push = true;
            if (count != size) {
                /* stop pushing; TTY layer can't handle more */
                port->n_read += count;
                pr_vdebug(PREFIX "%d: rx block %d/%d\n",
                          port->port_num,
                          count, req->actual);
                break;
            }
            port->n_read = 0;
        }
recycle:
        list_move(&req->list, &port->read_pool);
        port->read_started--;
    }

    /* Push from tty to ldisc; without low_latency set this is handled by
     * a workqueue, so we won't get callbacks and can hold port_lock
     */
    if (tty && do_push)
        tty_flip_buffer_push(tty);

    /* We want our data queue to become empty ASAP, keeping data
     * in the tty and ldisc (not here).  If we couldn't push any
     * this time around, there may be trouble unless there's an
     * implicit tty_unthrottle() call on its way...
     *
     * REVISIT we should probably add a timer to keep the work queue
     * from starving ... but it's not clear that case ever happens.
     */
    if (!list_empty(queue) && tty) {
        if (!test_bit(TTY_THROTTLED, &tty->flags)) {
            if (do_push)
                queue_work(gserial_wq, &port->push);
            else
                pr_warning(PREFIX "%d: RX not scheduled?\n",
                           port->port_num);
        }
    }

    /* If we're still connected, refill the USB RX queue. */
    if (!disconnect && port->port_usb)
        gs_start_rx(port);

    spin_unlock_irq(&port->port_lock);
}
Beispiel #18
0
int modify_job_attr(

  job    *pjob,  /* I (modified) */
  svrattrl *plist, /* I */
  int     perm,
  int    *bad)   /* O */

  {
  int      allow_unkn;
  long      i;
  attribute  newattr[(int)JOB_ATR_LAST];
  attribute *pattr;
  int      rc;

  if (pjob->ji_qhdr->qu_qs.qu_type == QTYPE_Execution)
    allow_unkn = -1;
  else
    allow_unkn = (int)JOB_ATR_UNKN;

  pattr = pjob->ji_wattr;

  /* call attr_atomic_set to decode and set a copy of the attributes */

  rc = attr_atomic_set(
         plist,        /* I */
         pattr,        /* I */
         newattr,      /* O */
         job_attr_def, /* I */
         JOB_ATR_LAST,
         allow_unkn,   /* I */
         perm,         /* I */
         bad);         /* O */

  /* if resource limits are being changed ... */

  if ((rc == 0) &&
      (newattr[(int)JOB_ATR_resource].at_flags & ATR_VFLAG_SET))
    {
    if ((perm & (ATR_DFLAG_MGWR | ATR_DFLAG_OPWR)) == 0)
      {
      /* If job is running, only manager/operator can raise limits */

      if (pjob->ji_qs.ji_state == JOB_STATE_RUNNING)
        {
        if ((comp_resc2(
               &pjob->ji_wattr[(int)JOB_ATR_resource],
               &newattr[(int)JOB_ATR_resource],
               server.sv_attr[(int)SRV_ATR_QCQLimits].at_val.at_long,
               NULL) == -1) ||
            (comp_resc_lt != 0))
          {
          rc = PBSE_PERM;
          }
        }

      /* Also check against queue and system limits */

      if (rc == 0)
        {
        rc = chk_resc_limits(
               &newattr[(int)JOB_ATR_resource],
               pjob->ji_qhdr,
               NULL);
        }
      }
    }    /* END if ((rc == 0) && ...) */

  /* special check on permissions for hold */

  if ((rc == 0) &&
      (newattr[(int)JOB_ATR_hold].at_flags & ATR_VFLAG_MODIFY))
    {
    i = newattr[(int)JOB_ATR_hold].at_val.at_long ^
        (pattr + (int)JOB_ATR_hold)->at_val.at_long;

    rc = chk_hold_priv(i, perm);
    }

  if (rc == 0)
    {
    for (i = 0;i < JOB_ATR_LAST;i++)
      {
      if (newattr[i].at_flags & ATR_VFLAG_MODIFY)
        {
        if (job_attr_def[i].at_action)
          {
          rc = job_attr_def[i].at_action(
                 &newattr[i],
                 pjob,
                 ATR_ACTION_ALTER);

          if (rc)
            break;
          }
        }
      }    /* END for (i) */

    if ((rc == 0) &&
        ((newattr[(int)JOB_ATR_userlst].at_flags & ATR_VFLAG_MODIFY) ||
         (newattr[(int)JOB_ATR_grouplst].at_flags & ATR_VFLAG_MODIFY)))
      {
      /* need to reset execution uid and gid */

      rc = set_jobexid(pjob, newattr, NULL);
      }

    if ((rc == 0) &&
        (newattr[(int)JOB_ATR_outpath].at_flags & ATR_VFLAG_MODIFY))
      {
      /* need to recheck if JOB_ATR_outpath is a special case of host only */

      if (newattr[(int)JOB_ATR_outpath].at_val.at_str[strlen(newattr[(int)JOB_ATR_outpath].at_val.at_str) - 1] == ':')
        {
        newattr[(int)JOB_ATR_outpath].at_val.at_str =
          prefix_std_file(pjob, (int)'o');
        }
      /*
       * if the output path was specified and ends with a '/'
       * then append the standard file name
       */
      else if (newattr[(int)JOB_ATR_outpath].at_val.at_str[strlen(newattr[(int)JOB_ATR_outpath].at_val.at_str) - 1] == '/')
        {
          newattr[(int)JOB_ATR_outpath].at_val.at_str[strlen(newattr[(int)JOB_ATR_outpath].at_val.at_str) - 1] = '\0';
          
          replace_attr_string(&newattr[(int)JOB_ATR_outpath],
                            (add_std_filename(pjob,
                            newattr[(int)JOB_ATR_outpath].at_val.at_str,
                            (int)'o')));
        }
      }

    if ((rc == 0) &&
        (newattr[(int)JOB_ATR_errpath].at_flags & ATR_VFLAG_MODIFY))
      {
      /* need to recheck if JOB_ATR_errpath is a special case of host only */

      if (newattr[(int)JOB_ATR_errpath].at_val.at_str[strlen(newattr[(int)JOB_ATR_errpath].at_val.at_str) - 1] == ':')
        {
        newattr[(int)JOB_ATR_errpath].at_val.at_str =
          prefix_std_file(pjob, (int)'e');
        }
      /*
       * if the error path was specified and ends with a '/'
       * then append the standard file name
       */
      else if (newattr[(int)JOB_ATR_errpath].at_val.at_str[strlen(newattr[(int)JOB_ATR_errpath].at_val.at_str) - 1] == '/')
        {
          newattr[(int)JOB_ATR_errpath].at_val.at_str[strlen(newattr[(int)JOB_ATR_errpath].at_val.at_str) - 1] = '\0';
          
          replace_attr_string(&newattr[(int)JOB_ATR_errpath],
                            (add_std_filename(pjob,
                            newattr[(int)JOB_ATR_errpath].at_val.at_str,
                            (int)'e')));
        }
      }

    }  /* END if (rc == 0) */

  if (rc != 0)
    {
    for (i = 0;i < JOB_ATR_LAST;i++)
      job_attr_def[i].at_free(newattr + i);

    /* FAILURE */

    return(rc);
    }  /* END if (rc != 0) */

  /* OK, now copy the new values into the job attribute array */

  for (i = 0;i < JOB_ATR_LAST;i++)
    {
    if (newattr[i].at_flags & ATR_VFLAG_MODIFY)
      {
      if (LOGLEVEL >= 7)
        {
        sprintf(log_buffer, "attr %s modified",
                job_attr_def[i].at_name);

        LOG_EVENT(
          PBSEVENT_JOB,
          PBS_EVENTCLASS_JOB,
          pjob->ji_qs.ji_jobid,
          log_buffer);
        }

      job_attr_def[i].at_free(pattr + i);

      if ((newattr[i].at_type == ATR_TYPE_LIST) ||
          (newattr[i].at_type == ATR_TYPE_RESC))
        {
        list_move(
          &newattr[i].at_val.at_list,
          &(pattr + i)->at_val.at_list);
        }
      else
        {
        *(pattr + i) = newattr[i];
        }

      (pattr + i)->at_flags = newattr[i].at_flags;
      }
    }    /* END for (i) */

  /* note, the newattr[] attributes are on the stack, they go away automatically */

  pjob->ji_modified = 1;

  return(0);
  }  /* END modify_job_attr() */