Exemplo n.º 1
0
/*
 * Set up the argument/result storage required for the RPC call.
 */
static int nfs_write_rpcsetup(struct nfs_page *req,
		struct nfs_write_data *data,
		const struct rpc_call_ops *call_ops,
		unsigned int count, unsigned int offset,
		int how)
{
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
	int priority = flush_task_priority(how);
	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = req->wb_context->cred,
	};
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.task = &data->task,
		.rpc_message = &msg,
		.callback_ops = call_ops,
		.callback_data = data,
		.workqueue = nfsiod_workqueue,
		.flags = flags,
		.priority = priority,
	};

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	data->req = req;
	data->inode = inode = req->wb_context->path.dentry->d_inode;
	data->cred = msg.rpc_cred;

	data->args.fh     = NFS_FH(inode);
	data->args.offset = req_offset(req) + offset;
	data->args.pgbase = req->wb_pgbase + offset;
	data->args.pages  = data->pagevec;
	data->args.count  = count;
	data->args.context = get_nfs_open_context(req->wb_context);
	data->args.stable  = NFS_UNSTABLE;
	if (how & FLUSH_STABLE) {
		data->args.stable = NFS_DATA_SYNC;
		if (!nfs_need_commit(NFS_I(inode)))
			data->args.stable = NFS_FILE_SYNC;
	}

	data->res.fattr   = &data->fattr;
	data->res.count   = count;
	data->res.verf    = &data->verf;
	nfs_fattr_init(&data->fattr);

	/* Set up the initial task struct.  */
	NFS_PROTO(inode)->write_setup(data, &msg);

	dprintk("NFS: %5u initiated write call "
		"(req %s/%lld, %u bytes @ offset %llu)\n",
		data->task.tk_pid,
		inode->i_sb->s_id,
		(long long)NFS_FILEID(inode),
		count,
		(unsigned long long)data->args.offset);

	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

/* If a nfs_flush_* function fails, it should remove reqs from @head and
 * call this on each, which will prepare them to be retried on next
 * writeback using standard nfs.
 */
static void nfs_redirty_request(struct nfs_page *req)
{
	nfs_mark_request_dirty(req);
	nfs_end_page_writeback(req->wb_page);
	nfs_clear_page_tag_locked(req);
}

/*
 * Generate multiple small requests to write out a single
 * contiguous dirty area on one page.
 */
static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
	struct nfs_page *req = nfs_list_entry(head->next);
	struct page *page = req->wb_page;
	struct nfs_write_data *data;
	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
	unsigned int offset;
	int requests = 0;
	int ret = 0;
	LIST_HEAD(list);

	nfs_list_remove_request(req);

	nbytes = count;
	do {
		size_t len = min(nbytes, wsize);

		data = nfs_writedata_alloc(1);
		if (!data)
			goto out_bad;
		list_add(&data->pages, &list);
		requests++;
		nbytes -= len;
	} while (nbytes != 0);
	atomic_set(&req->wb_complete, requests);

	ClearPageError(page);
	offset = 0;
	nbytes = count;
	do {
		int ret2;

		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del_init(&data->pages);

		data->pagevec[0] = page;

		if (nbytes < wsize)
			wsize = nbytes;
		ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
				   wsize, offset, how);
		if (ret == 0)
			ret = ret2;
		offset += wsize;
		nbytes -= wsize;
	} while (nbytes != 0);

	return ret;

out_bad:
	while (!list_empty(&list)) {
		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del(&data->pages);
		nfs_writedata_release(data);
	}
	nfs_redirty_request(req);
	return -ENOMEM;
}

/*
 * Create an RPC task for the given write request and kick it.
 * The page must have been locked by the caller.
 *
 * It may happen that the page we're passed is not marked dirty.
 * This is the case if nfs_updatepage detects a conflicting request
 * that has been written but not committed.
 */
static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
	struct nfs_page		*req;
	struct page		**pages;
	struct nfs_write_data	*data;

	data = nfs_writedata_alloc(npages);
	if (!data)
		goto out_bad;

	pages = data->pagevec;
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_list_add_request(req, &data->pages);
		ClearPageError(req->wb_page);
		*pages++ = req->wb_page;
	}
	req = nfs_list_entry(data->pages.next);

	/* Set up the argument struct */
	return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
 out_bad:
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_redirty_request(req);
	}
	return -ENOMEM;
}

static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
				  struct inode *inode, int ioflags)
{
	size_t wsize = NFS_SERVER(inode)->wsize;

	if (wsize < PAGE_CACHE_SIZE)
		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
	else
		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
}
Exemplo n.º 2
0
/*
 * the back merge hash support functions
 */
static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
{
    drq->on_hash = 0;
    list_del_init(&drq->hash);
}
Exemplo n.º 3
0
/**
 * v9fs_unregister_trans - unregister a 9p transport
 * @m: the transport to remove
 *
 */
void v9fs_unregister_trans(struct p9_trans_module *m)
{
    spin_lock(&v9fs_trans_lock);
    list_del_init(&m->list);
    spin_unlock(&v9fs_trans_lock);
}
Exemplo n.º 4
0
void RxPktPendingTimeout(unsigned long data)
{
	PRX_TS_RECORD	pRxTs = (PRX_TS_RECORD)data;
	struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device, RxTsRecord[pRxTs->num]);
	
	PRX_REORDER_ENTRY 	pReorderEntry = NULL;

	unsigned long flags = 0;
	struct rtllib_rxb *stats_IndicateArray[REORDER_WIN_SIZE];
	u8 index = 0;
	bool bPktInBuf = false;

	spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
	if(pRxTs->RxTimeoutIndicateSeq != 0xffff)
	{
		while(!list_empty(&pRxTs->RxPendingPktList))
		{
			pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
			if(index == 0)
				pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;

			if( SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) || 
				SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)	)
			{
				list_del_init(&pReorderEntry->List);
			
				if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq))
					pRxTs->RxIndicateSeq = (pRxTs->RxIndicateSeq + 1) % 4096;

				RTLLIB_DEBUG(RTLLIB_DL_REORDER,"%s(): Indicate SeqNum: %d\n",__func__, pReorderEntry->SeqNum);
				stats_IndicateArray[index] = pReorderEntry->prxb;
				index++;
				
				list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
			}
			else
			{
				bPktInBuf = true;
				break;
			}
		}
	}

	if(index>0){
		pRxTs->RxTimeoutIndicateSeq = 0xffff;
	
		if(index > REORDER_WIN_SIZE){
			RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
			spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
			return;
		}
		rtllib_indicate_packets(ieee, stats_IndicateArray, index);
		bPktInBuf = false;
	}

	if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff)){
		pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
		mod_timer(&pRxTs->RxPktPendingTimer,  jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime));
	}
	spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
Exemplo n.º 5
0
/*
 * AFS Cache Manager kernel thread
 */
static int kafscmd(void *arg)
{
	DECLARE_WAITQUEUE(myself, current);

	struct rxrpc_call *call;
	_SRXAFSCM_xxxx_t func;
	int die;

	printk("kAFS: Started kafscmd %d\n", current->pid);

	daemonize("kafscmd");

	complete(&kafscmd_alive);

	/* loop around looking for things to attend to */
	do {
		if (list_empty(&kafscmd_attention_list)) {
			set_current_state(TASK_INTERRUPTIBLE);
			add_wait_queue(&kafscmd_sleepq, &myself);

			for (;;) {
				set_current_state(TASK_INTERRUPTIBLE);
				if (!list_empty(&kafscmd_attention_list) ||
				    signal_pending(current) ||
				    kafscmd_die)
					break;

				schedule();
			}

			remove_wait_queue(&kafscmd_sleepq, &myself);
			set_current_state(TASK_RUNNING);
		}

		die = kafscmd_die;

		/* dequeue the next call requiring attention */
		call = NULL;
		spin_lock(&kafscmd_attention_lock);

		if (!list_empty(&kafscmd_attention_list)) {
			call = list_entry(kafscmd_attention_list.next,
					  struct rxrpc_call,
					  app_attn_link);
			list_del_init(&call->app_attn_link);
			die = 0;
		}

		spin_unlock(&kafscmd_attention_lock);

		if (call) {
			/* act upon it */
			_debug("@@@ Begin Attend Call %p", call);

			func = call->app_user;
			if (func)
				func(call);

			rxrpc_put_call(call);

			_debug("@@@ End Attend Call %p", call);
		}

	} while(!die);
Exemplo n.º 6
0
static void shadow_lru_isolate(struct list_head *item,
			       spinlock_t *lru_lock)
{
	struct address_space *mapping;
	struct radix_tree_node *node;
	unsigned int i;

	/*
	 * Page cache insertions and deletions synchroneously maintain
	 * the shadow node LRU under the mapping->tree_lock and the
	 * lru_lock.  Because the page cache tree is emptied before
	 * the inode can be destroyed, holding the lru_lock pins any
	 * address_space that has radix tree nodes on the LRU.
	 *
	 * We can then safely transition to the mapping->tree_lock to
	 * pin only the address_space of the particular node we want
	 * to reclaim, take the node off-LRU, and drop the lru_lock.
	 */

	node = container_of(item, struct radix_tree_node, private_list);
	mapping = node->private_data;

	/* Coming from the list, invert the lock order */
	if (!spin_trylock(&mapping->tree_lock)) {
		spin_unlock(lru_lock);
		goto out;
	}

	list_del_init(item);
	nr_shadow_nodes--;

	spin_unlock(lru_lock);

	/*
	 * The nodes should only contain one or more shadow entries,
	 * no pages, so we expect to be able to remove them all and
	 * delete and free the empty node afterwards.
	 */

	BUG_ON(!node->count);
	BUG_ON(node->count & RADIX_TREE_COUNT_MASK);

	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
		if (node->slots[i]) {
			BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
			node->slots[i] = NULL;
			BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
			node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
			BUG_ON(!mapping->nrshadows);
			mapping->nrshadows--;
		}
	}
	BUG_ON(node->count);
	inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
	if (!__radix_tree_delete_node(&mapping->page_tree, node))
		BUG();

	spin_unlock(&mapping->tree_lock);
out:
	local_irq_enable();
	cond_resched();
	local_irq_disable();
	spin_lock(lru_lock);
}
/**ltl
功能:合并两个request:把next合到rq之后
参数:
返回值:
说明;rq和next两个请求的sector相邻。
*/
static void noop_merged_requests(request_queue_t *q, struct request *rq,
				 struct request *next)
{
	list_del_init(&next->queuelist);
}
Exemplo n.º 8
0
/**
 * Searches the specified queue for the specified queue for the command
 * to abort.
 *
 * @param [in] a
 * @param [in] abort_request
 * @param [in] cmd
 * t
 * @return 0 on failure, 1 if command was not found, 2 if command was found
 */
static int esas2r_check_active_queue(struct esas2r_adapter *a,
				     struct esas2r_request **abort_request,
				     struct scsi_cmnd *cmd,
				     struct list_head *queue)
{
	bool found = false;
	struct esas2r_request *ar = *abort_request;
	struct esas2r_request *rq;
	struct list_head *element, *next;

	list_for_each_safe(element, next, queue) {

		rq = list_entry(element, struct esas2r_request, req_list);

		if (rq->cmd == cmd) {

			/* Found the request.  See what to do with it. */
			if (queue == &a->active_list) {
				/*
				 * We are searching the active queue, which
				 * means that we need to send an abort request
				 * to the firmware.
				 */
				ar = esas2r_alloc_request(a);
				if (ar == NULL) {
					esas2r_log_dev(ESAS2R_LOG_WARN,
						       &(a->host->shost_gendev),
						       "unable to allocate an abort request for cmd %p",
						       cmd);
					return 0; /* Failure */
				}

				/*
				 * Task management request must be formatted
				 * with a lock held.
				 */
				ar->sense_len = 0;
				ar->vrq->scsi.length = 0;
				ar->target_id = rq->target_id;
				ar->vrq->scsi.flags |= cpu_to_le32(
					(u8)le32_to_cpu(rq->vrq->scsi.flags));

				memset(ar->vrq->scsi.cdb, 0,
				       sizeof(ar->vrq->scsi.cdb));

				ar->vrq->scsi.flags |= cpu_to_le32(
					FCP_CMND_TRM);
				ar->vrq->scsi.u.abort_handle =
					rq->vrq->scsi.handle;
			} else {
				/*
				 * The request is pending but not active on
				 * the firmware.  Just free it now and we'll
				 * report the successful abort below.
				 */
				list_del_init(&rq->req_list);
				esas2r_free_request(a, rq);
			}

			found = true;
			break;
		}

	}
Exemplo n.º 9
0
/* entry of  listen service */
NEINT32 common_update_entry(struct listen_contex *listen_info)
{
	NEINT32 ret  ,sleep = 0 ;

	struct cm_manager *pmanger  ;
	struct ne_client_map  *client;
	
	NEUINT16 session_id = 0;
#ifdef USER_UPDATE_LIST
	struct list_head *pos ;
	swap_list_t *swap ;
#else 
//	NEINT32 i, num ;
	cmlist_iterator_t cm_iterator ;
#endif 
	
	pmanger = ne_listensrv_get_cmmamager(listen_info) ;	

	if(ne_atomic_read(&pmanger->connect_num) <= 0) {
		ne_sleep(100) ;
		return 0 ;
	}
	
#ifdef USER_UPDATE_LIST
	swap = &listen_info->wait_add ;
	if(0==ne_mutex_trylock(&swap->lock) ) {
		list_join(&swap->list, &listen_info->conn_list) ;
		INIT_LIST_HEAD(&swap->list) ;
		ne_mutex_unlock(&swap->lock) ;
	}
	
	pos = listen_info->conn_list.next ;
	while (pos!=&listen_info->conn_list) {
		client = list_entry(pos,struct ne_client_map , map_list) ;
		pos = pos->next ;
		
		session_id = client->connect_node.session_id ;
		client = pmanger->trylock(pmanger, session_id) ;
		if(client) {
			++sleep;
			ret = tryto_close_tcpsession((ne_session_handle)client, listen_info->operate_timeout ) ;
			if(ret) {
				if(-1==ret)	list_del_init(&client->map_list) ;
				pmanger->unlock(pmanger,session_id) ;
				continue ;
			}
			ret = ne_do_netmsg(client,&listen_info->tcp) ;					
			if(ret > 0) {
				ne_tcpnode_flush_sendbuf(&(client->connect_node)) ;
			}
			else if(ret ==0){
				if(0==tcp_client_close(client,1) ) {
					list_del_init(&client->map_list) ;
				}
			}
			pmanger->unlock(pmanger,session_id) ;
		}
		
	}
#else 
//	num = ne_atomic_read(&pmanger->connect_num);
//	i = 0 ;
	for(client = pmanger->lock_first (pmanger,&cm_iterator) ; client;	client = pmanger->lock_next (pmanger,&cm_iterator) ) {
		if(tryto_close_tcpsession((ne_session_handle)client, listen_info->operate_timeout ) ) {
			continue ;
		}
		ret = ne_do_netmsg(client,&listen_info->tcp) ;
		if(ret>0) {
			ne_tcpnode_flush_sendbuf(&(client->connect_node)) ;
			sleep++ ;
		}
		else if(0==ret) {
			tcp_client_close(client,1) ;
		}
	}
#endif
	if(!sleep ){
		ne_sleep(100);
	}
	return 0;
}
Exemplo n.º 10
0
static int osio_dispatch(struct request_queue *q, int force)
{
	struct osio_data *od = q->elevator->elevator_data;
	const unsigned int non_empty[3] = {!list_empty(&od->fifo_head[OSIO_DIR_READ]),
					   !list_empty(&od->fifo_head[OSIO_DIR_SYNC_WRITE]),
					   !list_empty(&od->fifo_head[OSIO_DIR_ASYNC_WRITE]),};
	struct request *rq = NULL;

	osio_dbg("1, od->fifo_dir = %d\n", od->fifo_dir);
	osio_dbg("1, non_empty[0] = %d\n", non_empty[0]);
	osio_dbg("1, non_empty[1] = %d\n", non_empty[1]);
	osio_dbg("1, non_empty[2] = %d\n", non_empty[2]);

	/* dispatch a batch of rq */
	if (od->fifo_dir != OSIO_DIR_UNDEF) {
		if ((od->batching >= od->fifo_batch[od->fifo_dir]) || (!non_empty[od->fifo_dir])) {
			od->fifo_dir = OSIO_DIR_UNDEF;
		} else {
			goto dispatch_request;
		}
	}

	/* redecide the direction */
	if (non_empty[OSIO_DIR_READ]) {
		goto dir_read;
	}

	if (non_empty[OSIO_DIR_SYNC_WRITE]) {
		goto dir_sync_write;
	}

	if (non_empty[OSIO_DIR_ASYNC_WRITE]) {
		goto dir_async_write;
	}

	return 0;

dir_read:
	/* find a starved write rq */
	if ((od->write_starved[OSIO_SYNC] > od->write_starved_line[OSIO_SYNC]) && non_empty[OSIO_DIR_SYNC_WRITE]) {
		goto dir_sync_write;
	} else if ((od->write_starved[OSIO_ASYNC] > od->write_starved_line[OSIO_ASYNC]) && non_empty[OSIO_DIR_ASYNC_WRITE]) {
		goto dir_async_write;
	}

	od->fifo_dir = OSIO_DIR_READ;
	od->batching = 0;
	od->write_starved[OSIO_SYNC] += non_empty[OSIO_DIR_SYNC_WRITE];
	od->write_starved[OSIO_ASYNC] += non_empty[OSIO_DIR_ASYNC_WRITE];
	goto dispatch_request;

dir_sync_write:
	if ((od->write_starved[OSIO_ASYNC] > od->write_starved_line[OSIO_ASYNC]) && non_empty[OSIO_DIR_ASYNC_WRITE]) {
		goto dir_async_write;
	}

	od->fifo_dir = OSIO_DIR_SYNC_WRITE;
	od->batching = 0;
	od->write_starved[OSIO_SYNC] = 0;
	od->write_starved[OSIO_ASYNC] += non_empty[OSIO_DIR_ASYNC_WRITE];
	goto dispatch_request;

dir_async_write:
	od->fifo_dir = OSIO_DIR_ASYNC_WRITE;
	od->batching = 0;
	od->write_starved[OSIO_ASYNC] = 0;
	od->write_starved[OSIO_SYNC] += non_empty[OSIO_DIR_SYNC_WRITE];
	goto dispatch_request;

dispatch_request:
	/* dispatch req */
	osio_dbg("2, od->fifo_dir = %d\n", od->fifo_dir);
	osio_dbg("2, od->batching = %d\n", od->batching);
	rq = rq_entry_fifo(od->fifo_head[od->fifo_dir].next);
	list_del_init(&rq->queuelist);
	elv_dispatch_add_tail(q, rq);
	od->batching ++;
	return 1;
}
Exemplo n.º 11
0
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	int ret;
	unsigned long flags;
	struct ks_bridge *ksb = fp->private_data;
	struct data_pkt *pkt;
	size_t space, copied;

read_start:
	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
		return -ENODEV;

	spin_lock_irqsave(&ksb->lock, flags);
	if (list_empty(&ksb->to_ks_list)) {
		spin_unlock_irqrestore(&ksb->lock, flags);
		ret = wait_event_interruptible(ksb->ks_wait_q,
				!list_empty(&ksb->to_ks_list) ||
				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
		if (ret < 0)
			return ret;

		goto read_start;
	}

	space = count;
	copied = 0;
	while (!list_empty(&ksb->to_ks_list) && space) {
		size_t len;

		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
		len = min_t(size_t, space, pkt->len);
		pkt->n_read += len;
		spin_unlock_irqrestore(&ksb->lock, flags);

		ret = copy_to_user(buf + copied, pkt->buf, len);
		if (ret) {
			pr_err("copy_to_user failed err:%d\n", ret);
			ksb_free_data_pkt(pkt);
			return ret;
		}

		space -= len;
		copied += len;

		spin_lock_irqsave(&ksb->lock, flags);
		if (pkt->n_read == pkt->len) {
			/*
			 * re-init the packet and queue it
			 * for more data.
			 */
			list_del_init(&pkt->list);
			pkt->n_read = 0;
			pkt->len = MAX_DATA_PKT_SIZE;
			spin_unlock_irqrestore(&ksb->lock, flags);
			submit_one_urb(ksb, GFP_KERNEL, pkt);
			spin_lock_irqsave(&ksb->lock, flags);
		}
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	dbg_log_event(ksb, "KS_READ", copied, 0);

	pr_debug("count:%d space:%d copied:%d", count, space, copied);

	return copied;
}
Exemplo n.º 12
0
/******** functions ********/
static void osio_merged_requests(struct request_queue *q, struct request *rq, struct request *next)
{
	list_del_init(&next->queuelist);
}
Exemplo n.º 13
0
/* All actions that we need after sending hello on passive conn:
 * 1) Cope with 1st easy case: conn is already linked to a peer
 * 2) Cope with 2nd easy case: remove zombie conn
  * 3) Resolve race:
 *    a) find the peer
 *    b) link the conn to the peer if conn[idx] is empty
 *    c) if the conn[idx] isn't empty and is in READY state,
 *       remove the conn as duplicated
 *    d) if the conn[idx] isn't empty and isn't in READY state,
 *       override conn[idx] with the conn
 */
int
usocklnd_passiveconn_hellosent(usock_conn_t *conn)
{
	usock_conn_t    *conn2;
	usock_peer_t    *peer;
	struct list_head tx_list;
	struct list_head zcack_list;
	int              idx;
	int              rc = 0;

	/* almost nothing to do if conn is already linked to peer hash table */
	if (conn->uc_peer != NULL)
		goto passive_hellosent_done;

	/* conn->uc_peer == NULL, so the conn isn't accessible via
	 * peer hash list, so nobody can touch the conn but us */

	if (conn->uc_ni == NULL) /* remove zombie conn */
		goto passive_hellosent_connkill;

	/* all code below is race resolution, because normally
	 * passive conn is linked to peer just after receiving hello */
	CFS_INIT_LIST_HEAD (&tx_list);
	CFS_INIT_LIST_HEAD (&zcack_list);

	/* conn is passive and isn't linked to any peer,
	   so its tx and zc_ack lists have to be empty */
	LASSERT (list_empty(&conn->uc_tx_list) &&
		 list_empty(&conn->uc_zcack_list) &&
		 conn->uc_sending == 0);

	rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
	if (rc)
		return rc;

	idx = usocklnd_type2idx(conn->uc_type);

	/* try to link conn to peer */
	pthread_mutex_lock(&peer->up_lock);
	if (peer->up_conns[idx] == NULL) {
		usocklnd_link_conn_to_peer(conn, peer, idx);
		usocklnd_conn_addref(conn);
		conn->uc_peer = peer;
		usocklnd_peer_addref(peer);
	} else {
		conn2 = peer->up_conns[idx];
		pthread_mutex_lock(&conn2->uc_lock);

		if (conn2->uc_state == UC_READY) {
			/* conn2 is in READY state, so conn is "duplicated" */
			pthread_mutex_unlock(&conn2->uc_lock);
			pthread_mutex_unlock(&peer->up_lock);
			usocklnd_peer_decref(peer);
			usocklnd_conn_kill(conn2);
			goto passive_hellosent_connkill;
		}

		/* uc_state != UC_READY => switch conn and conn2 */
		/* Relink txs and zc_acks from conn2 to conn.
		 * We're sure that nobody but us can access to conn,
		 * nevertheless we use mutex (if we're wrong yet,
		 * deadlock is easy to see that corrupted list */
		list_add(&tx_list, &conn2->uc_tx_list);
		list_del_init(&conn2->uc_tx_list);
		list_add(&zcack_list, &conn2->uc_zcack_list);
		list_del_init(&conn2->uc_zcack_list);

		pthread_mutex_lock(&conn->uc_lock);
		list_add_tail(&conn->uc_tx_list, &tx_list);
		list_del_init(&tx_list);
		list_add_tail(&conn->uc_zcack_list, &zcack_list);
		list_del_init(&zcack_list);
		conn->uc_peer = peer;
		pthread_mutex_unlock(&conn->uc_lock);

		conn2->uc_peer = NULL; /* make conn2 zombie */
		pthread_mutex_unlock(&conn2->uc_lock);
		usocklnd_conn_decref(conn2);

		usocklnd_link_conn_to_peer(conn, peer, idx);
		usocklnd_conn_addref(conn);
		conn->uc_peer = peer;
	}

	lnet_ni_decref(conn->uc_ni);
	conn->uc_ni = NULL;
	pthread_mutex_unlock(&peer->up_lock);
	usocklnd_peer_decref(peer);

  passive_hellosent_done:
	/* safely transit to UC_READY state */
	/* rc == 0 */
	pthread_mutex_lock(&conn->uc_lock);
	if (conn->uc_state != UC_DEAD) {
		usocklnd_rx_ksmhdr_state_transition(conn);

		/* we're ready to recive incoming packets and maybe
		   already have smth. to transmit */
		LASSERT (conn->uc_sending == 0);
		if ( list_empty(&conn->uc_tx_list) &&
		     list_empty(&conn->uc_zcack_list) ) {
			conn->uc_tx_flag = 0;
			rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
						 POLLIN);
		} else {
			conn->uc_tx_deadline =
				cfs_time_shift(usock_tuns.ut_timeout);
			conn->uc_tx_flag = 1;
			rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
						      POLLIN | POLLOUT);
		}

		if (rc == 0)
			conn->uc_state = UC_READY;
	}
	pthread_mutex_unlock(&conn->uc_lock);
	return rc;

  passive_hellosent_connkill:
	usocklnd_conn_kill(conn);
	return 0;
}
Exemplo n.º 14
0
/* All actions that we need after receiving hello on active conn:
 * 1) Schedule removing if we're zombie
 * 2) Restart active conn if we lost the race
 * 3) Else: update RX part to receive KSM header
 */
int
usocklnd_activeconn_hellorecv(usock_conn_t *conn)
{
	int                rc    = 0;
	ksock_hello_msg_t *hello = conn->uc_rx_hello;
	usock_peer_t      *peer  = conn->uc_peer;

	/* Active conn with peer==NULL is zombie.
	 * Don't try to link it to peer because the conn
	 * has already had a chance to proceed at the beginning */
	if (peer == NULL) {
		LASSERT(list_empty(&conn->uc_tx_list) &&
			list_empty(&conn->uc_zcack_list));

		usocklnd_conn_kill(conn);
		return 0;
	}

	peer->up_last_alive = cfs_time_current();

	/* peer says that we lost the race */
	if (hello->kshm_ctype == (__u32)SOCKLND_CONN_NONE) {
		/* Start new active conn, relink txs and zc_acks from
		 * the conn to new conn, schedule removing the conn.
		 * Actually, we're expecting that a passive conn will
		 * make us zombie soon and take care of our txs and
		 * zc_acks */

		struct list_head tx_list, zcack_list;
		usock_conn_t *conn2;
		int idx = usocklnd_type2idx(conn->uc_type);

		CFS_INIT_LIST_HEAD (&tx_list);
		CFS_INIT_LIST_HEAD (&zcack_list);

		/* Block usocklnd_send() to check peer->up_conns[idx]
		 * and to enqueue more txs */
		pthread_mutex_lock(&peer->up_lock);
		pthread_mutex_lock(&conn->uc_lock);

		/* usocklnd_shutdown() could kill us */
		if (conn->uc_state == UC_DEAD) {
			pthread_mutex_unlock(&conn->uc_lock);
			pthread_mutex_unlock(&peer->up_lock);
			return 0;
		}

		LASSERT (peer == conn->uc_peer);
		LASSERT (peer->up_conns[idx] == conn);

		rc = usocklnd_create_active_conn(peer, conn->uc_type, &conn2);
		if (rc) {
			conn->uc_errored = 1;
			pthread_mutex_unlock(&conn->uc_lock);
			pthread_mutex_unlock(&peer->up_lock);
			return rc;
		}

		usocklnd_link_conn_to_peer(conn2, peer, idx);
		conn2->uc_peer = peer;

		/* unlink txs and zcack from the conn */
		list_add(&tx_list, &conn->uc_tx_list);
		list_del_init(&conn->uc_tx_list);
		list_add(&zcack_list, &conn->uc_zcack_list);
		list_del_init(&conn->uc_zcack_list);

		/* link they to the conn2 */
		list_add(&conn2->uc_tx_list, &tx_list);
		list_del_init(&tx_list);
		list_add(&conn2->uc_zcack_list, &zcack_list);
		list_del_init(&zcack_list);

		/* make conn zombie */
		conn->uc_peer = NULL;
		usocklnd_peer_decref(peer);

		/* schedule conn2 for processing */
		rc = usocklnd_add_pollrequest(conn2, POLL_ADD_REQUEST, POLLOUT);
		if (rc) {
			peer->up_conns[idx] = NULL;
			usocklnd_conn_decref(conn2); /* should destroy conn */
		} else {
			usocklnd_conn_kill_locked(conn);
		}

		pthread_mutex_unlock(&conn->uc_lock);
		pthread_mutex_unlock(&peer->up_lock);
		usocklnd_conn_decref(conn);

	} else { /* hello->kshm_ctype != SOCKLND_CONN_NONE */
		if (conn->uc_type != usocklnd_invert_type(hello->kshm_ctype))
			return -EPROTO;

		pthread_mutex_lock(&peer->up_lock);
		usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation,
					     conn);
		pthread_mutex_unlock(&peer->up_lock);

		/* safely transit to UC_READY state */
		/* rc == 0 */
		pthread_mutex_lock(&conn->uc_lock);
		if (conn->uc_state != UC_DEAD) {
			usocklnd_rx_ksmhdr_state_transition(conn);

			/* POLLIN is already set because we just
			 * received hello, but maybe we've smth. to
			 * send? */
			LASSERT (conn->uc_sending == 0);
			if ( !list_empty(&conn->uc_tx_list) ||
			     !list_empty(&conn->uc_zcack_list) ) {

				conn->uc_tx_deadline =
					cfs_time_shift(usock_tuns.ut_timeout);
				conn->uc_tx_flag = 1;
				rc = usocklnd_add_pollrequest(conn,
							      POLL_SET_REQUEST,
							      POLLIN | POLLOUT);
			}

			if (rc == 0)
				conn->uc_state = UC_READY;
		}
		pthread_mutex_unlock(&conn->uc_lock);
	}

	return rc;
}
Exemplo n.º 15
0
/**
 * isci_terminate_request_core() - This function will terminate the given
 *    request, and wait for it to complete.  This function must only be called
 *    from a thread that can wait.  Note that the request is terminated and
 *    completed (back to the host, if started there).
 * @ihost: This SCU.
 * @idev: The target.
 * @isci_request: The I/O request to be terminated.
 *
 */
static void isci_terminate_request_core(struct isci_host *ihost,
					struct isci_remote_device *idev,
					struct isci_request *isci_request)
{
	enum sci_status status      = SCI_SUCCESS;
	bool was_terminated         = false;
	bool needs_cleanup_handling = false;
	unsigned long     flags;
	unsigned long     termination_completed = 1;
	struct completion *io_request_completion;

	dev_dbg(&ihost->pdev->dev,
		"%s: device = %p; request = %p\n",
		__func__, idev, isci_request);

	spin_lock_irqsave(&ihost->scic_lock, flags);

	io_request_completion = isci_request->io_request_completion;

	/* Note that we are not going to control
	 * the target to abort the request.
	 */
	set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);

	/* Make sure the request wasn't just sitting around signalling
	 * device condition (if the request handle is NULL, then the
	 * request completed but needed additional handling here).
	 */
	if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
		was_terminated = true;
		needs_cleanup_handling = true;
		status = sci_controller_terminate_request(ihost,
							   idev,
							   isci_request);
	}
	spin_unlock_irqrestore(&ihost->scic_lock, flags);

	/*
	 * The only time the request to terminate will
	 * fail is when the io request is completed and
	 * being aborted.
	 */
	if (status != SCI_SUCCESS) {
		dev_dbg(&ihost->pdev->dev,
			"%s: sci_controller_terminate_request"
			" returned = 0x%x\n",
			__func__, status);

		isci_request->io_request_completion = NULL;

	} else {
		if (was_terminated) {
			dev_dbg(&ihost->pdev->dev,
				"%s: before completion wait (%p/%p)\n",
				__func__, isci_request, io_request_completion);

			/* Wait here for the request to complete. */
			termination_completed
				= wait_for_completion_timeout(
				   io_request_completion,
				   msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));

			if (!termination_completed) {

				/* The request to terminate has timed out.  */
				spin_lock_irqsave(&ihost->scic_lock, flags);

				/* Check for state changes. */
				if (!test_bit(IREQ_TERMINATED,
					      &isci_request->flags)) {

					/* The best we can do is to have the
					 * request die a silent death if it
					 * ever really completes.
					 */
					isci_request_mark_zombie(ihost,
								 isci_request);
					needs_cleanup_handling = true;
				} else
					termination_completed = 1;

				spin_unlock_irqrestore(&ihost->scic_lock,
						       flags);

				if (!termination_completed) {

					dev_dbg(&ihost->pdev->dev,
						"%s: *** Timeout waiting for "
						"termination(%p/%p)\n",
						__func__, io_request_completion,
						isci_request);

					/* The request can no longer be referenced
					 * safely since it may go away if the
					 * termination every really does complete.
					 */
					isci_request = NULL;
				}
			}
			if (termination_completed)
				dev_dbg(&ihost->pdev->dev,
					"%s: after completion wait (%p/%p)\n",
					__func__, isci_request, io_request_completion);
		}

		if (termination_completed) {

			isci_request->io_request_completion = NULL;

			/* Peek at the status of the request.  This will tell
			 * us if there was special handling on the request such that it
			 * needs to be detached and freed here.
			 */
			spin_lock_irqsave(&isci_request->state_lock, flags);

			needs_cleanup_handling
				= isci_request_is_dealloc_managed(
					isci_request->status);

			spin_unlock_irqrestore(&isci_request->state_lock, flags);

		}
		if (needs_cleanup_handling) {

			dev_dbg(&ihost->pdev->dev,
				"%s: cleanup isci_device=%p, request=%p\n",
				__func__, idev, isci_request);

			if (isci_request != NULL) {
				spin_lock_irqsave(&ihost->scic_lock, flags);
				isci_free_tag(ihost, isci_request->io_tag);
				isci_request_change_state(isci_request, unallocated);
				list_del_init(&isci_request->dev_node);
				spin_unlock_irqrestore(&ihost->scic_lock, flags);
			}
		}
	}
}
Exemplo n.º 16
0
static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
				     struct page *pages[],
				     unsigned long num_pages)
{
	struct page *page, *next;
	struct svga_guest_mem_descriptor *page_virtual = NULL;
	struct svga_guest_mem_descriptor *desc_virtual = NULL;
	unsigned int desc_per_page;
	unsigned long prev_pfn;
	unsigned long pfn;
	int ret;

	desc_per_page = PAGE_SIZE /
	    sizeof(struct svga_guest_mem_descriptor) - 1;

	while (likely(num_pages != 0)) {
		page = alloc_page(__GFP_HIGHMEM);
		if (unlikely(page == NULL)) {
			ret = -ENOMEM;
			goto out_err;
		}

		list_add_tail(&page->lru, desc_pages);

		/*
		 * Point previous page terminating descriptor to this
		 * page before unmapping it.
		 */

		if (likely(page_virtual != NULL)) {
			desc_virtual->ppn = page_to_pfn(page);
			kunmap_atomic(page_virtual, KM_USER0);
		}

		page_virtual = kmap_atomic(page, KM_USER0);
		desc_virtual = page_virtual - 1;
		prev_pfn = ~(0UL);

		while (likely(num_pages != 0)) {
			pfn = page_to_pfn(*pages);

			if (pfn != prev_pfn + 1) {

				if (desc_virtual - page_virtual ==
				    desc_per_page - 1)
					break;

				(++desc_virtual)->ppn = cpu_to_le32(pfn);
				desc_virtual->num_pages = cpu_to_le32(1);
			} else {
				uint32_t tmp =
				    le32_to_cpu(desc_virtual->num_pages);
				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
			}
			prev_pfn = pfn;
			--num_pages;
			++pages;
		}

		(++desc_virtual)->ppn = cpu_to_le32(0);
		desc_virtual->num_pages = cpu_to_le32(0);
	}

	if (likely(page_virtual != NULL))
		kunmap_atomic(page_virtual, KM_USER0);

	return 0;
out_err:
	list_for_each_entry_safe(page, next, desc_pages, lru) {
		list_del_init(&page->lru);
		__free_page(page);
	}
Exemplo n.º 17
0
static void hub_events(void)
{
    unsigned long flags;
    struct list_head *tmp;
    struct usb_device *dev;
    struct usb_hub *hub;
    u16 hubstatus;
    u16 hubchange;
    u16 portstatus;
    u16 portchange;
    int i, ret;
    int m=0;
    /*
     *  We restart the list every time to avoid a deadlock with
     * deleting hubs downstream from this one. This should be
     * safe since we delete the hub from the event list.
     * Not the most efficient, but avoids deadlocks.
     */

    while (m<5) {
        m++;
        spin_lock_irqsave(&hub_event_lock, flags);

        if (list_empty(&hub_event_list))
            break;

        /* Grab the next entry from the beginning of the list */
        tmp = hub_event_list.next;

        hub = list_entry(tmp, struct usb_hub, event_list);
        dev = interface_to_usbdev(hub->intf);

        list_del_init(tmp);

        if (unlikely(down_trylock(&hub->khubd_sem)))
            BUG();	/* never blocks, we were on list */

        spin_unlock_irqrestore(&hub_event_lock, flags);

        if (hub->error) {
            dev_dbg (&hub->intf->dev, "resetting for error %d\n",
                     hub->error);

            if (hub_reset(hub)) {
                dev_dbg (&hub->intf->dev,
                         "can't reset; disconnecting\n");
                up(&hub->khubd_sem);
                hub_start_disconnect(dev);
                continue;
            }

            hub->nerrors = 0;
            hub->error = 0;
        }

        for (i = 0; i < hub->descriptor->bNbrPorts; i++) {
            ret = hub_port_status(dev, i, &portstatus, &portchange);
            if (ret < 0) {
                continue;
            }

            if (portchange & USB_PORT_STAT_C_CONNECTION) {
                hub_port_connect_change(hub, i, portstatus, portchange);
            } else if (portchange & USB_PORT_STAT_C_ENABLE) {
                dev_dbg (hubdev (dev),
                         "port %d enable change, status %x\n",
                         i + 1, portstatus);
                clear_port_feature(dev,
                                   i + 1, USB_PORT_FEAT_C_ENABLE);

                /*
                 * EM interference sometimes causes badly
                 * shielded USB devices to be shutdown by
                 * the hub, this hack enables them again.
                 * Works at least with mouse driver.
                 */
                if (!(portstatus & USB_PORT_STAT_ENABLE)
                        && (portstatus & USB_PORT_STAT_CONNECTION)
                        && (dev->children[i])) {
                    dev_err (&hub->intf->dev,
                             "port %i "
                             "disabled by hub (EMI?), "
                             "re-enabling...",
                             i + 1);
                    hub_port_connect_change(hub,
                                            i, portstatus, portchange);
                }
            }

            if (portchange & USB_PORT_STAT_C_SUSPEND) {
                dev_dbg (&hub->intf->dev,
                         "suspend change on port %d\n",
                         i + 1);
                clear_port_feature(dev,
                                   i + 1,  USB_PORT_FEAT_C_SUSPEND);
            }

            if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
                dev_err (&hub->intf->dev,
                         "over-current change on port %d\n",
                         i + 1);
                clear_port_feature(dev,
                                   i + 1, USB_PORT_FEAT_C_OVER_CURRENT);
                hub_power_on(hub);
            }

            if (portchange & USB_PORT_STAT_C_RESET) {
                dev_dbg (&hub->intf->dev,
                         "reset change on port %d\n",
                         i + 1);
                clear_port_feature(dev,
                                   i + 1, USB_PORT_FEAT_C_RESET);
            }
        } /* end for i */

        /* deal with hub status changes */
        if (hub_hub_status(hub, &hubstatus, &hubchange) < 0)
            dev_err (&hub->intf->dev, "get_hub_status failed\n");
        else {
            if (hubchange & HUB_CHANGE_LOCAL_POWER) {
                dev_dbg (&hub->intf->dev, "power change\n");
                clear_hub_feature(dev, C_HUB_LOCAL_POWER);
            }
            if (hubchange & HUB_CHANGE_OVERCURRENT) {
                dev_dbg (&hub->intf->dev, "overcurrent change\n");
                wait_ms(500);	/* Cool down */
                clear_hub_feature(dev, C_HUB_OVER_CURRENT);
                hub_power_on(hub);
            }
        }
        up(&hub->khubd_sem);
    } /* end while (1) */

    spin_unlock_irqrestore(&hub_event_lock, flags);
}
Exemplo n.º 18
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);

#ifdef CONFIG_SYS_HAS_CONTROL_CPU
		set_cpus_allowed_ptr(create.result, cpumask_of(0));
#else
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
#endif
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);

#ifdef 	CONFIG_SYS_HAS_CONTROL_CPU
	set_cpus_allowed_ptr(tsk, cpu_control_mask);
#else
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
#endif

	set_mems_allowed(node_possible_map);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
Exemplo n.º 19
0
static inline void _dequeue_task_dummy(struct task_struct *p)
{
  struct sched_dummy_entity *dummy_se = &p->dummy_se;
  list_del_init(&dummy_se->run_list);
}
Exemplo n.º 20
0
static ssize_t
printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
{
	struct printer_dev		*dev = fd->private_data;
	unsigned long			flags;
	size_t				size;
	size_t				bytes_copied;
	struct usb_request		*req;
	/* This is a pointer to the current USB rx request. */
	struct usb_request		*current_rx_req;
	/* This is the number of bytes in the current rx buffer. */
	size_t				current_rx_bytes;
	/* This is a pointer to the current rx buffer. */
	u8				*current_rx_buf;

	if (len == 0)
		return -EINVAL;

	DBG(dev, "printer_read trying to read %d bytes\n", (int)len);

	mutex_lock(&dev->lock_printer_io);
	spin_lock_irqsave(&dev->lock, flags);

	/* We will use this flag later to check if a printer reset happened
	 * after we turn interrupts back on.
	 */
	dev->reset_printer = 0;

	setup_rx_reqs(dev);

	bytes_copied = 0;
	current_rx_req = dev->current_rx_req;
	current_rx_bytes = dev->current_rx_bytes;
	current_rx_buf = dev->current_rx_buf;
	dev->current_rx_req = NULL;
	dev->current_rx_bytes = 0;
	dev->current_rx_buf = NULL;

	/* Check if there is any data in the read buffers. Please note that
	 * current_rx_bytes is the number of bytes in the current rx buffer.
	 * If it is zero then check if there are any other rx_buffers that
	 * are on the completed list. We are only out of data if all rx
	 * buffers are empty.
	 */
	if ((current_rx_bytes == 0) &&
			(likely(list_empty(&dev->rx_buffers)))) {
		/* Turn interrupts back on before sleeping. */
		spin_unlock_irqrestore(&dev->lock, flags);

		/*
		 * If no data is available check if this is a NON-Blocking
		 * call or not.
		 */
		if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
			mutex_unlock(&dev->lock_printer_io);
			return -EAGAIN;
		}

		/* Sleep until data is available */
		wait_event_interruptible(dev->rx_wait,
				(likely(!list_empty(&dev->rx_buffers))));
		spin_lock_irqsave(&dev->lock, flags);
	}

	/* We have data to return then copy it to the caller's buffer.*/
	while ((current_rx_bytes || likely(!list_empty(&dev->rx_buffers)))
			&& len) {
		if (current_rx_bytes == 0) {
			req = container_of(dev->rx_buffers.next,
					struct usb_request, list);
			list_del_init(&req->list);

			if (req->actual && req->buf) {
				current_rx_req = req;
				current_rx_bytes = req->actual;
				current_rx_buf = req->buf;
			} else {
				list_add(&req->list, &dev->rx_reqs);
				continue;
			}
		}

		/* Don't leave irqs off while doing memory copies */
		spin_unlock_irqrestore(&dev->lock, flags);

		if (len > current_rx_bytes)
			size = current_rx_bytes;
		else
			size = len;

		size -= copy_to_user(buf, current_rx_buf, size);
		bytes_copied += size;
		len -= size;
		buf += size;

		spin_lock_irqsave(&dev->lock, flags);

		/* We've disconnected or reset so return. */
		if (dev->reset_printer) {
			list_add(&current_rx_req->list, &dev->rx_reqs);
			spin_unlock_irqrestore(&dev->lock, flags);
			mutex_unlock(&dev->lock_printer_io);
			return -EAGAIN;
		}

		/* If we not returning all the data left in this RX request
		 * buffer then adjust the amount of data left in the buffer.
		 * Othewise if we are done with this RX request buffer then
		 * requeue it to get any incoming data from the USB host.
		 */
		if (size < current_rx_bytes) {
			current_rx_bytes -= size;
			current_rx_buf += size;
		} else {
			list_add(&current_rx_req->list, &dev->rx_reqs);
			current_rx_bytes = 0;
			current_rx_buf = NULL;
			current_rx_req = NULL;
		}
	}
Exemplo n.º 21
0
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
Exemplo n.º 22
0
s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
{
	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
	struct xmit_frame *pxmitframe = NULL;
	struct xmit_frame *pfirstframe = NULL;

	/*  aggregate variable */
	struct hw_xmit *phwxmit;
	struct sta_info *psta = NULL;
	struct tx_servq *ptxservq = NULL;

	struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;

	u32 pbuf;	/*  next pkt address */
	u32 pbuf_tail;	/*  last pkt tail */
	u32 len;	/*  packet length, except TXDESC_SIZE and PKT_OFFSET */

	u32 bulksize = haldata->UsbBulkOutSize;
	u8 desc_cnt;
	u32 bulkptr;

	/*  dump frame variable */
	u32 ff_hwaddr;

	RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));

	/*  check xmitbuffer is ok */
	if (pxmitbuf == NULL) {
		pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
		if (pxmitbuf == NULL)
			return false;
	}

	/* 3 1. pick up first frame */
	rtw_free_xmitframe(pxmitpriv, pxmitframe);

	pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
	if (pxmitframe == NULL) {
		/*  no more xmit frame, release xmit buffer */
		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
		return false;
	}

	pxmitframe->pxmitbuf = pxmitbuf;
	pxmitframe->buf_addr = pxmitbuf->pbuf;
	pxmitbuf->priv_data = pxmitframe;

	pxmitframe->agg_num = 1; /*  alloc xmitframe should assign to 1. */
	pxmitframe->pkt_offset = 1; /*  first frame of aggregation, reserve offset */

	rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);

	/*  always return ndis_packet after rtw_xmitframe_coalesce */
	rtw_os_xmit_complete(adapt, pxmitframe);

	/* 3 2. aggregate same priority and same DA(AP or STA) frames */
	pfirstframe = pxmitframe;
	len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
	pbuf_tail = len;
	pbuf = round_up(pbuf_tail, 8);

	/*  check pkt amount in one bulk */
	desc_cnt = 0;
	bulkptr = bulksize;
	if (pbuf < bulkptr) {
		desc_cnt++;
	} else {
		desc_cnt = 0;
		bulkptr = ((pbuf / bulksize) + 1) * bulksize; /*  round to next bulksize */
	}

	/*  dequeue same priority packet from station tx queue */
	psta = pfirstframe->attrib.psta;
	switch (pfirstframe->attrib.priority) {
	case 1:
	case 2:
		ptxservq = &(psta->sta_xmitpriv.bk_q);
		phwxmit = pxmitpriv->hwxmits + 3;
		break;
	case 4:
	case 5:
		ptxservq = &(psta->sta_xmitpriv.vi_q);
		phwxmit = pxmitpriv->hwxmits + 1;
		break;
	case 6:
	case 7:
		ptxservq = &(psta->sta_xmitpriv.vo_q);
		phwxmit = pxmitpriv->hwxmits;
		break;
	case 0:
	case 3:
	default:
		ptxservq = &(psta->sta_xmitpriv.be_q);
		phwxmit = pxmitpriv->hwxmits + 2;
		break;
	}
	spin_lock_bh(&pxmitpriv->lock);

	xmitframe_phead = get_list_head(&ptxservq->sta_pending);
	xmitframe_plist = xmitframe_phead->next;

	while (xmitframe_phead != xmitframe_plist) {
		pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
		xmitframe_plist = xmitframe_plist->next;

		pxmitframe->agg_num = 0; /*  not first frame of aggregation */
		pxmitframe->pkt_offset = 0; /*  not first frame of aggregation, no need to reserve offset */

		len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);

		if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
			pxmitframe->agg_num = 1;
			pxmitframe->pkt_offset = 1;
			break;
		}
		list_del_init(&pxmitframe->list);
		ptxservq->qcnt--;
		phwxmit->accnt--;

		pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;

		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
		/*  always return ndis_packet after rtw_xmitframe_coalesce */
		rtw_os_xmit_complete(adapt, pxmitframe);

		/*  (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
		update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);

		/*  don't need xmitframe any more */
		rtw_free_xmitframe(pxmitpriv, pxmitframe);

		/*  handle pointer and stop condition */
		pbuf_tail = pbuf + len;
		pbuf = round_up(pbuf_tail, 8);

		pfirstframe->agg_num++;
		if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
			break;

		if (pbuf < bulkptr) {
			desc_cnt++;
			if (desc_cnt == haldata->UsbTxAggDescNum)
				break;
		} else {
			desc_cnt = 0;
			bulkptr = ((pbuf / bulksize) + 1) * bulksize;
		}
	} /* end while (aggregate same priority and same DA(AP or STA) frames) */

	if (list_empty(&ptxservq->sta_pending.queue))
		list_del_init(&ptxservq->tx_pending);

	spin_unlock_bh(&pxmitpriv->lock);
	if ((pfirstframe->attrib.ether_type != 0x0806) &&
	    (pfirstframe->attrib.ether_type != 0x888e) &&
	    (pfirstframe->attrib.ether_type != 0x88b4) &&
	    (pfirstframe->attrib.dhcp_pkt != 1))
		rtw_issue_addbareq_cmd(adapt, pfirstframe);
	/* 3 3. update first frame txdesc */
	if ((pbuf_tail % bulksize) == 0) {
		/*  remove pkt_offset */
		pbuf_tail -= PACKET_OFFSET_SZ;
		pfirstframe->buf_addr += PACKET_OFFSET_SZ;
		pfirstframe->pkt_offset--;
	}

	update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);

	/* 3 4. write xmit buffer to USB FIFO */
	ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
	usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);

	/* 3 5. update statisitc */
	pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
	pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);

	rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);

	rtw_free_xmitframe(pxmitpriv, pfirstframe);

	return true;
}
Exemplo n.º 23
0
void
schedule(void) {
  bool intr_flag;
  struct proc_struct *next;
#ifndef MT_SUPPORT
  list_entry_t head;
  int lapic_id = pls_read(lapic_id);
#endif

  local_intr_save(intr_flag);
  int lcpu_count = pls_read(lcpu_count);
  {
    current->need_resched = 0;
#ifndef MT_SUPPORT
		if (current->mm)
		{
			assert(current->mm->lapic == lapic_id);
			current->mm->lapic = -1;
		}
#endif
        if (current->state == PROC_RUNNABLE && current->pid >= lcpu_count) {
            sched_class_enqueue(current);
        }
#ifndef MT_SUPPORT
		list_init(&head);
		while (1)
		{
			next = sched_class_pick_next();
			if (next != NULL) sched_class_dequeue(next);

			if (next && next->mm && next->mm->lapic != -1)
			{
				list_add(&head, &(next->run_link));
			}
			else
			{
				list_entry_t *cur;
				while ((cur = list_next(&head)) != &head)
				{
					list_del_init(cur);
					sched_class_enqueue(le2proc(cur, run_link));
				}

				break;
			}
		}
#else
		next = sched_class_pick_next();
		if (next != NULL)
			sched_class_dequeue(next);
#endif  /* !MT_SUPPORT */
        if (next == NULL) {
            next = idleproc;
        }
        next->runs ++;
		/* Collect information here*/
		if (sched_collect_info) {
			int lcpu_count = pls_read(lcpu_count);
			int lcpu_idx = pls_read(lcpu_idx);
			int loc = sched_info_head[lcpu_idx];
			int prev = sched_info_pid[loc*lcpu_count + lcpu_idx];
			if (next->pid == prev)
				sched_info_times[loc*lcpu_count + lcpu_idx] ++;
			else {
				sched_info_head[lcpu_idx] ++;
				if (sched_info_head[lcpu_idx] >= PGSIZE / sizeof(uint16_t) / lcpu_count)
					sched_info_head[lcpu_idx] = 0;
				loc = sched_info_head[lcpu_idx];
				uint16_t prev_pid = sched_info_pid[loc*lcpu_count + lcpu_idx];
				uint16_t prev_times = sched_info_times[loc*lcpu_count + lcpu_idx];
				if (prev_times > 0 && prev_pid >= lcpu_count + 2)
					sched_slices[lcpu_idx][prev_pid % SLICEPOOL_SIZE] += prev_times;
				sched_info_pid[loc*lcpu_count + lcpu_idx] = next->pid;
				sched_info_times[loc*lcpu_count + lcpu_idx] = 1;
			}
		}
#ifndef MT_SUPPORT
		assert(!next->mm || next->mm->lapic == -1);
		if (next->mm)
			next->mm->lapic = lapic_id;
#endif
        if (next != current) {
#if 0
          kprintf("N %d to %d\n", current->pid, next->pid);
#endif
          proc_run(next);
        }
    }
    local_intr_restore(intr_flag);
}
Exemplo n.º 24
0
/**
 * process_cursors - do action on each cursor attached to inode
 * @inode:
 * @act: action to do
 *
 * Finds all cursors of @inode in reiser4's super block radix tree of cursors
 * and performs action specified by @act on each of cursors.
 */
static void process_cursors(struct inode *inode, enum cursor_action act)
{
	oid_t oid;
	dir_cursor *start;
	struct list_head *head;
	reiser4_context *ctx;
	struct d_cursor_info *info;

	/* this can be called by
	 *
	 * kswapd->...->prune_icache->..reiser4_destroy_inode
	 *
	 * without reiser4_context
	 */
	ctx = reiser4_init_context(inode->i_sb);
	if (IS_ERR(ctx)) {
		warning("vs-23", "failed to init context");
		return;
	}

	assert("nikita-3558", inode != NULL);

	info = d_info(inode);
	oid = get_inode_oid(inode);
	spin_lock_inode(inode);
	head = get_readdir_list(inode);
	spin_lock(&d_lock);
	/* find any cursor for this oid: reference to it is hanging of radix
	 * tree */
	start = lookup(info, (unsigned long)oid);
	if (start != NULL) {
		dir_cursor *scan;
		reiser4_file_fsdata *fsdata;

		/* process circular list of cursors for this oid */
		scan = start;
		do {
			dir_cursor *next;

			next = list_entry(scan->list.next, dir_cursor, list);
			fsdata = scan->fsdata;
			assert("nikita-3557", fsdata != NULL);
			if (scan->key.oid == oid) {
				switch (act) {
				case CURSOR_DISPOSE:
					list_del_init(&fsdata->dir.linkage);
					break;
				case CURSOR_LOAD:
					list_add(&fsdata->dir.linkage, head);
					break;
				case CURSOR_KILL:
					kill_cursor(scan);
					break;
				}
			}
			if (scan == next)
				/* last cursor was just killed */
				break;
			scan = next;
		} while (scan != start);
	}
	spin_unlock(&d_lock);
	/* check that we killed 'em all */
	assert("nikita-3568",
	       ergo(act == CURSOR_KILL,
		    list_empty_careful(get_readdir_list(inode))));
	assert("nikita-3569",
	       ergo(act == CURSOR_KILL, lookup(info, oid) == NULL));
	spin_unlock_inode(inode);
	reiser4_exit_context(ctx);
}
Exemplo n.º 25
0
void drbd_req_destroy(struct kref *kref)
{
	struct drbd_request *req = container_of(kref, struct drbd_request, kref);
	struct drbd_conf *mdev = req->w.mdev;
	const unsigned s = req->rq_state;

	if ((req->master_bio && !(s & RQ_POSTPONED)) ||
		atomic_read(&req->completion_ref) ||
		(s & RQ_LOCAL_PENDING) ||
		((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
		dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
				s, atomic_read(&req->completion_ref));
		return;
	}

	/* remove it from the transfer log.
	 * well, only if it had been there in the first
	 * place... if it had not (local only or conflicting
	 * and never sent), it should still be "empty" as
	 * initialized in drbd_req_new(), so we can list_del() it
	 * here unconditionally */
	list_del_init(&req->tl_requests);

	/* if it was a write, we may have to set the corresponding
	 * bit(s) out-of-sync first. If it had a local part, we need to
	 * release the reference to the activity log. */
	if (s & RQ_WRITE) {
		/* Set out-of-sync unless both OK flags are set
		 * (local only or remote failed).
		 * Other places where we set out-of-sync:
		 * READ with local io-error */

		/* There is a special case:
		 * we may notice late that IO was suspended,
		 * and postpone, or schedule for retry, a write,
		 * before it even was submitted or sent.
		 * In that case we do not want to touch the bitmap at all.
		 */
		if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
			if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
				drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);

			if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
				drbd_set_in_sync(mdev, req->i.sector, req->i.size);
		}

		/* one might be tempted to move the drbd_al_complete_io
		 * to the local io completion callback drbd_request_endio.
		 * but, if this was a mirror write, we may only
		 * drbd_al_complete_io after this is RQ_NET_DONE,
		 * otherwise the extent could be dropped from the al
		 * before it has actually been written on the peer.
		 * if we crash before our peer knows about the request,
		 * but after the extent has been dropped from the al,
		 * we would forget to resync the corresponding extent.
		 */
		if (s & RQ_IN_ACT_LOG) {
			if (get_ldev_if_state(mdev, D_FAILED)) {
				drbd_al_complete_io(mdev, &req->i);
				put_ldev(mdev);
			} else if (__ratelimit(&drbd_ratelimit_state)) {
				dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
					 "but my Disk seems to have failed :(\n",
					 (unsigned long long) req->i.sector, req->i.size);
			}
		}
	}

	mempool_free(req, drbd_request_mempool);
}
Exemplo n.º 26
0
void
wait_queue_del(wait_queue_t *queue, wait_t *wait) {
    assert(!list_empty(&(wait->wait_link)) && wait->wait_queue == queue);
    list_del_init(&(wait->wait_link));
}
Exemplo n.º 27
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}


/**
 * kthread_worker_fn - kthread function to process kthread_worker
 * @worker_ptr: pointer to initialized kthread_worker
 *
 * This function can be used as @threadfn to kthread_create() or
 * kthread_run() with @worker_ptr argument pointing to an initialized
 * kthread_worker.  The started kthread will process work_list until
 * the it is stopped with kthread_stop().  A kthread can also call
 * this function directly after extra initialization.
 *
 * Different kthreads can be used for the same kthread_worker as long
 * as there's only one kthread attached to it at any given time.  A
 * kthread_worker without an attached kthread simply collects queued
 * kthread_works.
 */
int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

	WARN_ON(worker->task);
	worker->task = current;
repeat:
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */

	if (kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}

	work = NULL;
	spin_lock_irq(&worker->lock);
	if (!list_empty(&worker->work_list)) {
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
		list_del_init(&work->node);
	}
	worker->current_work = work;
	spin_unlock_irq(&worker->lock);

	if (work) {
		__set_current_state(TASK_RUNNING);
		work->func(work);
	} else if (!freezing(current))
		schedule();

	try_to_freeze();
	goto repeat;
}
Exemplo n.º 28
0
/*
 * Any time a mark is getting freed we end up here.
 * The caller had better be holding a reference to this mark so we don't actually
 * do the final put under the mark->lock
 */
void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
				  struct fsnotify_group *group)
{
	struct inode *inode = NULL;

	BUG_ON(!mutex_is_locked(&group->mark_mutex));

	spin_lock(&mark->lock);

	/* something else already called this function on this mark */
	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
		spin_unlock(&mark->lock);
		return;
	}

	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;

  if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
    inode = mark->inode;
    fsnotify_destroy_inode_mark(mark);
  } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
    fsnotify_destroy_vfsmount_mark(mark);
  else if (mark->flags & FSNOTIFY_MARK_FLAG_TASK) {
    fsnotify_destroy_task_mark(mark);
  }
  else
    BUG();

	list_del_init(&mark->g_list);

	spin_unlock(&mark->lock);

	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
		iput(inode);
	/* release lock temporarily */
	mutex_unlock(&group->mark_mutex);

	spin_lock(&destroy_lock);
	list_add(&mark->g_list, &destroy_list);
	spin_unlock(&destroy_lock);
	wake_up(&destroy_waitq);
	/*
	 * We don't necessarily have a ref on mark from caller so the above destroy
	 * may have actually freed it, unless this group provides a 'freeing_mark'
	 * function which must be holding a reference.
	 */

	/*
	 * Some groups like to know that marks are being freed.  This is a
	 * callback to the group function to let it know that this mark
	 * is being freed.
	 */
	if (group->ops->freeing_mark)
		group->ops->freeing_mark(mark, group);

	/*
	 * __fsnotify_update_child_dentry_flags(inode);
	 *
	 * I really want to call that, but we can't, we have no idea if the inode
	 * still exists the second we drop the mark->lock.
	 *
	 * The next time an event arrive to this inode from one of it's children
	 * __fsnotify_parent will see that the inode doesn't care about it's
	 * children and will update all of these flags then.  So really this
	 * is just a lazy update (and could be a perf win...)
	 */

	atomic_dec(&group->num_marks);

	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
}
Exemplo n.º 29
0
/*
 * rrpc_move_valid_pages -- migrate live data off the block
 * @rrpc: the 'rrpc' structure
 * @block: the block from which to migrate live pages
 *
 * Description:
 *   GC algorithms may call this function to migrate remaining live
 *   pages off the block prior to erasing it. This function blocks
 *   further execution until the operation is complete.
 */
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
	struct nvm_tgt_dev *dev = rrpc->dev;
	struct request_queue *q = dev->q;
	struct rrpc_rev_addr *rev;
	struct nvm_rq *rqd;
	struct bio *bio;
	struct page *page;
	int slot;
	int nr_sec_per_blk = dev->geo.sec_per_blk;
	u64 phys_addr;
	DECLARE_COMPLETION_ONSTACK(wait);

	if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
		return 0;

	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		pr_err("nvm: could not alloc bio to gc\n");
		return -ENOMEM;
	}

	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
	if (!page) {
		bio_put(bio);
		return -ENOMEM;
	}

	while ((slot = find_first_zero_bit(rblk->invalid_pages,
					    nr_sec_per_blk)) < nr_sec_per_blk) {

		/* Lock laddr */
		phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;

try:
		spin_lock(&rrpc->rev_lock);
		/* Get logical address from physical to logical table */
		rev = &rrpc->rev_trans_map[phys_addr];
		/* already updated by previous regular write */
		if (rev->addr == ADDR_EMPTY) {
			spin_unlock(&rrpc->rev_lock);
			continue;
		}

		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
		if (IS_ERR_OR_NULL(rqd)) {
			spin_unlock(&rrpc->rev_lock);
			schedule();
			goto try;
		}

		spin_unlock(&rrpc->rev_lock);

		/* Perform read to do GC */
		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio_set_op_attrs(bio,  REQ_OP_READ, 0);
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc read failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);
		if (bio->bi_error) {
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}

		bio_reset(bio);
		reinit_completion(&wait);

		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		/* turn the command around and write the data back to a new
		 * address
		 */
		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc write failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);

		rrpc_inflight_laddr_release(rrpc, rqd);
		if (bio->bi_error)
			goto finished;

		bio_reset(bio);
	}

finished:
	mempool_free(page, rrpc->page_pool);
	bio_put(bio);

	if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
		pr_err("nvm: failed to garbage collect block\n");
		return -EIO;
	}

	return 0;
}

static void rrpc_block_gc(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct rrpc_lun *rlun = rblk->rlun;
	struct nvm_tgt_dev *dev = rrpc->dev;
	struct ppa_addr ppa;

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
			rlun->bppa.g.ch, rlun->bppa.g.lun,
			rblk->id);

	if (rrpc_move_valid_pages(rrpc, rblk))
		goto put_back;

	ppa.ppa = 0;
	ppa.g.ch = rlun->bppa.g.ch;
	ppa.g.lun = rlun->bppa.g.lun;
	ppa.g.blk = rblk->id;

	if (nvm_erase_blk(dev, &ppa, 0))
		goto put_back;

	rrpc_put_blk(rrpc, rblk);

	return;

put_back:
	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);
}

/* the block with highest number of invalid pages, will be in the beginning
 * of the list
 */
static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
							struct rrpc_block *rb)
{
	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
		return ra;

	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
}

/* linearly find the block with highest number of invalid pages
 * requires lun->lock
 */
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
	struct list_head *prio_list = &rlun->prio_list;
	struct rrpc_block *rblk, *max;

	BUG_ON(list_empty(prio_list));

	max = list_first_entry(prio_list, struct rrpc_block, prio);
	list_for_each_entry(rblk, prio_list, prio)
		max = rblk_max_invalid(max, rblk);

	return max;
}

static void rrpc_lun_gc(struct work_struct *work)
{
	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
	struct rrpc *rrpc = rlun->rrpc;
	struct nvm_tgt_dev *dev = rrpc->dev;
	struct rrpc_block_gc *gcb;
	unsigned int nr_blocks_need;

	nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;

	if (nr_blocks_need < rrpc->nr_luns)
		nr_blocks_need = rrpc->nr_luns;

	spin_lock(&rlun->lock);
	while (nr_blocks_need > rlun->nr_free_blocks &&
					!list_empty(&rlun->prio_list)) {
		struct rrpc_block *rblk = block_prio_find_max(rlun);

		if (!rblk->nr_invalid_pages)
			break;

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		list_del_init(&rblk->prio);

		WARN_ON(!block_is_full(rrpc, rblk));

		pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
					rlun->bppa.g.ch, rlun->bppa.g.lun,
					rblk->id);

		gcb->rrpc = rrpc;
		gcb->rblk = rblk;
		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);

		queue_work(rrpc->kgc_wq, &gcb->ws_gc);

		nr_blocks_need--;
	}
	spin_unlock(&rlun->lock);

	/* TODO: Hint that request queue can be started again */
}

static void rrpc_gc_queue(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct rrpc_lun *rlun = rblk->rlun;

	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
					rlun->bppa.g.ch, rlun->bppa.g.lun,
					rblk->id);
}

static const struct block_device_operations rrpc_fops = {
	.owner		= THIS_MODULE,
};

static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
{
	unsigned int i;
	struct rrpc_lun *rlun, *max_free;

	if (!is_gc)
		return get_next_lun(rrpc);

	/* during GC, we don't care about RR, instead we want to make
	 * sure that we maintain evenness between the block luns.
	 */
	max_free = &rrpc->luns[0];
	/* prevent GC-ing lun from devouring pages of a lun with
	 * little free blocks. We don't take the lock as we only need an
	 * estimate.
	 */
	rrpc_for_each_lun(rrpc, rlun, i) {
		if (rlun->nr_free_blocks > max_free->nr_free_blocks)
			max_free = rlun;
	}

	return max_free;
}
Exemplo n.º 30
0
static void sas_ata_task_done(struct sas_task *task)
{
	struct ata_queued_cmd *qc = task->uldd_task;
	struct domain_device *dev = task->dev;
	struct task_status_struct *stat = &task->task_status;
	struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
	struct sas_ha_struct *sas_ha = dev->port->ha;
	enum ata_completion_errors ac;
	unsigned long flags;
	struct ata_link *link;
	struct ata_port *ap;

	spin_lock_irqsave(&dev->done_lock, flags);
	if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
		task = NULL;
	else if (qc && qc->scsicmd)
		ASSIGN_SAS_TASK(qc->scsicmd, NULL);
	spin_unlock_irqrestore(&dev->done_lock, flags);

	/* check if libsas-eh got to the task before us */
	if (unlikely(!task))
		return;

	if (!qc)
		goto qc_already_gone;

	ap = qc->ap;
	link = &ap->link;

	spin_lock_irqsave(ap->lock, flags);
	/* check if we lost the race with libata/sas_ata_post_internal() */
	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) {
		spin_unlock_irqrestore(ap->lock, flags);
		if (qc->scsicmd)
			goto qc_already_gone;
		else {
			/* if eh is not involved and the port is frozen then the
			 * ata internal abort process has taken responsibility
			 * for this sas_task
			 */
			return;
		}
	}

	if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
	    ((stat->stat == SAM_STAT_CHECK_CONDITION &&
	      dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
		ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);

		if (!link->sactive) {
			qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
		} else {
			link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
			if (unlikely(link->eh_info.err_mask))
				qc->flags |= ATA_QCFLAG_FAILED;
		}
	} else {
		ac = sas_to_ata_err(stat);
		if (ac) {
			SAS_DPRINTK("%s: SAS error %x\n", __func__,
				    stat->stat);
			/* We saw a SAS error. Send a vague error. */
			if (!link->sactive) {
				qc->err_mask = ac;
			} else {
				link->eh_info.err_mask |= AC_ERR_DEV;
				qc->flags |= ATA_QCFLAG_FAILED;
			}

			dev->sata_dev.tf.feature = 0x04; /* status err */
			dev->sata_dev.tf.command = ATA_ERR;
		}
	}

	qc->lldd_task = NULL;
	ata_qc_complete(qc);
	spin_unlock_irqrestore(ap->lock, flags);

qc_already_gone:
	list_del_init(&task->list);
	sas_free_task(task);
}