Exemple #1
0
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
	struct rpcrdma_xprt *xprt = id->context;
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct rpcrdma_ep *ep = &xprt->rx_ep;
#ifdef RPC_DEBUG
	struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
#endif
	struct ib_qp_attr attr;
	struct ib_qp_init_attr iattr;
	int connstate = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
		ia->ri_async_rc = 0;
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
		ia->ri_async_rc = -EHOSTUNREACH;
		dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		connstate = 1;
		ib_query_qp(ia->ri_id->qp, &attr,
			IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
			&iattr);
		dprintk("RPC:       %s: %d responder resources"
			" (%d initiator)\n",
			__func__, attr.max_dest_rd_atomic, attr.max_rd_atomic);
		goto connected;
	case RDMA_CM_EVENT_CONNECT_ERROR:
		connstate = -ENOTCONN;
		goto connected;
	case RDMA_CM_EVENT_UNREACHABLE:
		connstate = -ENETDOWN;
		goto connected;
	case RDMA_CM_EVENT_REJECTED:
		connstate = -ECONNREFUSED;
		goto connected;
	case RDMA_CM_EVENT_DISCONNECTED:
		connstate = -ECONNABORTED;
		goto connected;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		connstate = -ENODEV;
connected:
		dprintk("RPC:       %s: %s: %pI4:%u (ep 0x%p event 0x%x)\n",
			__func__,
			(event->event <= 11) ? conn[event->event] :
						"unknown connection error",
			&addr->sin_addr.s_addr,
			ntohs(addr->sin_port),
			ep, event->event);
		atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1);
		dprintk("RPC:       %s: %sconnected\n",
					__func__, connstate > 0 ? "" : "dis");
		ep->rep_connected = connstate;
		ep->rep_func(ep);
		wake_up_all(&ep->rep_connect_wait);
		break;
	default:
		dprintk("RPC:       %s: unexpected CM event %d\n",
			__func__, event->event);
		break;
	}

#ifdef RPC_DEBUG
	if (connstate == 1) {
		int ird = attr.max_dest_rd_atomic;
		int tird = ep->rep_remote_cma.responder_resources;
		printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
			"on %s, memreg %d slots %d ird %d%s\n",
			&addr->sin_addr.s_addr,
			ntohs(addr->sin_port),
			ia->ri_id->device->name,
			ia->ri_memreg_strategy,
			xprt->rx_buf.rb_max_requests,
			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
	} else if (connstate < 0) {
		printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n",
			&addr->sin_addr.s_addr,
			ntohs(addr->sin_port),
			connstate);
	}
#endif

	return 0;
}
Exemple #2
0
/*
 * osprd_ioctl(inode, filp, cmd, arg)osp_spin_lock(&d->mutex);
		char wake = 't';
		if(d->readlockPids->num > 1)
		{
			findpid(d->readlockPids,current->pid,'r');
			wake = 'f';
		}
		else if(d->readlockPids->num == 1)
		{
			findpid(d->readlockPids,current->pid,'r');
			filp->f_flags &= ~F_OSPRD_LOCKED;
		}
		else //must be a writer.....
		{
			d->nwriters = 0;
			filp->f_flags &= ~F_OSPRD_LOCKED;
		}
		osp_spin_unlock(&d->mutex);
		if(wake == 't')
			wake_up_all(d->blockq);
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	osprd_info_t *d = file2osprd(filp);	// device info
	int r = 0;			// return value: initially 0
	unsigned int my_ticket;
	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;

	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	if (cmd == OSPRDIOCACQUIRE) {

		// EXERCISE: Lock the ramdisk.
		//
		// If *filp is open for writing (filp_writable), then attempt
		// to write-lock the ramdisk; otherwise attempt to read-lock
		// the ramdisk.
		//
                // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS.
		// Otherwise, if we can grant the lock request, return 0.

		// 'd->ticket_head' and 'd->ticket_tail' should help you
		// service lock requests in order.  These implement a ticket
		// order: 'ticket_tail' is the next ticket, and 'ticket_head'
		// is the ticket currently being served.  You should set a local
		// variable to 'd->ticket_head' and increment 'd->ticket_head'.
		// Then, block at least until 'd->ticket_tail == local_ticket'.
		// (Some of these operations are in a critical section and must
		// be protected by a spinlock; which ones?)

		// Your code here (instead of the next two lines).
		if(filp_writable)
		{
			if((d->nwriters == 1 && d->writelockPid == current->pid) || findpid(d->readlockPids,current->pid,'f'))
				return -EDEADLK;
			osp_spin_lock(&d->mutex);
			my_ticket = d->ticket_head;
			d->ticket_head++;
			osp_spin_unlock(&d->mutex);
			int stat = wait_event_interruptible(d->blockq, d->ticket_tail == my_ticket && d->nwriters == 0 && d->readlockPids->num == 0);//or filp->f_flags & F_OSPRD_LOCKED == 0
			if(stat == -ERESTARTSYS)
			{
				if(my_ticket == d->ticket_tail)
				{
					//lock mutex
					//increment the ticket tail to the first alive 
					//unlock mutex
					osp_spin_lock(&d->mutex);
					while(findticket(d->exitlist,d->ticket_tail,'f') && d->ticket_tail<d->ticket_head) // increment to first alive process
					d->ticket_tail++;
					osp_spin_unlock(&d->mutex);
				}
				else
				{
				osp_spin_lock(&d->mutex);
				pushticket(d->exitlist,my_ticket); //what if multiple processes get killed at the same time
				osp_spin_unlock(&d->mutex);
				}
				r = stat;
			}
			else
			{
				osp_spin_lock(&d->mutex);
				d->nwriters = 1;
				d->writelockPid = current->pid; 
				filp->f_flags |= F_OSPRD_LOCKED; // 
				d->ticket_tail++;//writer calls wake up all only when it releases...
				while(findticket(d->exitlist,d->ticket_tail,'f') && d->ticket_tail<d->ticket_head) // increment to first alive process
					d->ticket_tail++;
				osp_spin_unlock(&d->mutex);				
				r = 0;
			}
		}
		else
		{
			if(d->nwriters == 1 && d->writelockPid == current->pid)
				return -EDEADLK;
			osp_spin_lock(&d->mutex);
			my_ticket = d->ticket_head;
			d->ticket_head++;
			osp_spin_unlock(&d->mutex);
			int stat = wait_event_interruptible(d->blockq, d->ticket_tail == my_ticket && d->nwriters == 0);
			if(stat == -ERESTARTSYS)
			{
				if(my_ticket == d->ticket_tail)
				{
					//lock mutex
					//increment the ticket tail to the first alive 
					//unlock mutex
					osp_spin_lock(&d->mutex);
					while(findticket(d->exitlist,d->ticket_tail,'f') && d->ticket_tail<d->ticket_head) // increment to first alive process
					d->ticket_tail++;
					osp_spin_unlock(&d->mutex);
				}
				else
				{
				osp_spin_lock(&d->mutex);
				pushticket(d->exitlist,my_ticket); //what if multiple processes get killed at the same time
				osp_spin_unlock(&d->mutex);
				}
				r = stat;
			}
			else // got the lock
			{	
				osp_spin_lock(&d->mutex); //multiple readers try to push into readlist
				pushpid(d->readlockPids, current->pid);
				d->ticket_tail++;
				while(findticket(d->exitlist,d->ticket_tail,'f') && d->ticket_tail<d->ticket_head) // increment to first alive process
					d->ticket_tail++;
				filp->f_flags |= F_OSPRD_LOCKED;
				osp_spin_unlock(&d->mutex);
				wake_up_all(&d->blockq);// just incrementing ticket_tail doesnt work, must also wake up all to make them reevaluate condition.
				r = 0;
			}
		}
		/*eprintk("Attempting to acquire\n");
		r = -ENOTTY;
		*/
	} else if (cmd == OSPRDIOCTRYACQUIRE) {

		// EXERCISE: ATTEMPT to lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.
		if(filp_writable)
		{
			osp_spin_lock(&d->mutex);
			if(d->nwriters == 0 && d->readlockPids->num == 0)
			{
				d->nwriters = 1;
				d->writelockPid = current->pid;
				filp->f_flags |= F_OSPRD_LOCKED;
				r = 0;
			}
			else r = -EBUSY;
			osp_spin_unlock(&d->mutex);
		}
		else
		{
			osp_spin_lock(&d->mutex);
			if(d->nwriters == 0)
			{
				pushpid(d->readlockPids, current->pid);
				filp->f_flags |= F_OSPRD_LOCKED;
				r = 0;
			}
			else 
				r = -EBUSY;
			osp_spin_unlock(&d->mutex);
		}
		
		// Your code here (instead of the next two lines).
		
		//eprintk("Attempting to try acquire\n");
		//r = -ENOTTY;

	} else if (cmd == OSPRDIOCRELEASE) {

		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.

		// Your code here (instead of the next line).
		//r = -ENOTTY;
		
		if(!(filp->f_flags & F_OSPRD_LOCKED))
			return -EINVAL;
		osp_spin_lock(&d->mutex);
		char wake = 't';
		if(d->readlockPids->num > 1)
		{
			findpid(d->readlockPids,current->pid,'r');
			wake = 'f';
		}
		else if(d->readlockPids->num == 1)
		{
			findpid(d->readlockPids,current->pid,'r');
			filp->f_flags &= ~F_OSPRD_LOCKED;
		}
		else //must be a writer.....
		{
			d->nwriters = 0;
			filp->f_flags &= ~F_OSPRD_LOCKED;
		}
		osp_spin_unlock(&d->mutex);
		if(wake == 't')
			wake_up_all(&d->blockq);
		r = 0;
		
	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
enum sci_status sci_remote_node_context_suspend(
			struct sci_remote_node_context *sci_rnc,
			enum sci_remote_node_suspension_reasons suspend_reason,
			u32 suspend_type)
{
	enum scis_sds_remote_node_context_states state
		= sci_rnc->sm.current_state_id;
	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
	enum sci_status status = SCI_FAILURE_INVALID_STATE;
	enum sci_remote_node_context_destination_state dest_param =
		RNC_DEST_UNSPECIFIED;

	dev_dbg(scirdev_to_dev(idev),
		"%s: current state %s, current suspend_type %x dest state %d,"
			" arg suspend_reason %d, arg suspend_type %x",
		__func__, rnc_state_name(state), sci_rnc->suspend_type,
		sci_rnc->destination_state, suspend_reason,
		suspend_type);

	/* Disable automatic state continuations if explicitly suspending. */
	if ((suspend_reason == SCI_HW_SUSPEND) ||
	    (sci_rnc->destination_state == RNC_DEST_FINAL))
		dest_param = sci_rnc->destination_state;

	switch (state) {
	case SCI_RNC_READY:
		break;
	case SCI_RNC_INVALIDATING:
		if (sci_rnc->destination_state == RNC_DEST_FINAL) {
			dev_warn(scirdev_to_dev(idev),
				 "%s: already destroying %p\n",
				 __func__, sci_rnc);
			return SCI_FAILURE_INVALID_STATE;
		}
		/* Fall through and handle like SCI_RNC_POSTING */
	case SCI_RNC_RESUMING:
		/* Fall through and handle like SCI_RNC_POSTING */
	case SCI_RNC_POSTING:
		/* Set the destination state to AWAIT - this signals the
		 * entry into the SCI_RNC_READY state that a suspension
		 * needs to be done immediately.
		 */
		if (sci_rnc->destination_state != RNC_DEST_FINAL)
			sci_rnc->destination_state = RNC_DEST_SUSPENDED;
		sci_rnc->suspend_type = suspend_type;
		sci_rnc->suspend_reason = suspend_reason;
		return SCI_SUCCESS;

	case SCI_RNC_TX_SUSPENDED:
		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
			status = SCI_SUCCESS;
		break;
	case SCI_RNC_TX_RX_SUSPENDED:
		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
			status = SCI_SUCCESS;
		break;
	case SCI_RNC_AWAIT_SUSPENSION:
		if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
		    || (suspend_type == sci_rnc->suspend_type))
			return SCI_SUCCESS;
		break;
	default:
		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
			 "%s: invalid state %s\n", __func__,
			 rnc_state_name(state));
		return SCI_FAILURE_INVALID_STATE;
	}
	sci_rnc->destination_state = dest_param;
	sci_rnc->suspend_type = suspend_type;
	sci_rnc->suspend_reason = suspend_reason;

	if (status == SCI_SUCCESS) { /* Already in the destination state? */
		struct isci_host *ihost = idev->owning_port->owning_controller;

		wake_up_all(&ihost->eventq); /* Let observers look. */
		return SCI_SUCCESS;
	}
	if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
	    (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {

		if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
			isci_dev_set_hang_detection_timeout(idev, 0x00000001);

		sci_remote_device_post_request(
			idev, SCI_SOFTWARE_SUSPEND_CMD);
	}
	if (state != SCI_RNC_AWAIT_SUSPENSION)
		sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);

	return SCI_SUCCESS;
}
Exemple #4
0
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
	struct rpcrdma_xprt *xprt = id->context;
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct rpcrdma_ep *ep = &xprt->rx_ep;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
	struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
#endif
	struct ib_qp_attr *attr = &ia->ri_qp_attr;
	struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
	int connstate = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
		ia->ri_async_rc = 0;
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
		ia->ri_async_rc = -EHOSTUNREACH;
		dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		connstate = 1;
		ib_query_qp(ia->ri_id->qp, attr,
			    IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
			    iattr);
		dprintk("RPC:       %s: %d responder resources"
			" (%d initiator)\n",
			__func__, attr->max_dest_rd_atomic,
			attr->max_rd_atomic);
		rpcrdma_update_connect_private(xprt, &event->param.conn);
		goto connected;
	case RDMA_CM_EVENT_CONNECT_ERROR:
		connstate = -ENOTCONN;
		goto connected;
	case RDMA_CM_EVENT_UNREACHABLE:
		connstate = -ENETDOWN;
		goto connected;
	case RDMA_CM_EVENT_REJECTED:
		connstate = -ECONNREFUSED;
		goto connected;
	case RDMA_CM_EVENT_DISCONNECTED:
		connstate = -ECONNABORTED;
		goto connected;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		connstate = -ENODEV;
connected:
		dprintk("RPC:       %s: %sconnected\n",
					__func__, connstate > 0 ? "" : "dis");
		atomic_set(&xprt->rx_buf.rb_credits, 1);
		ep->rep_connected = connstate;
		rpcrdma_conn_func(ep);
		wake_up_all(&ep->rep_connect_wait);
		/*FALLTHROUGH*/
	default:
		dprintk("RPC:       %s: %pIS:%u (ep 0x%p): %s\n",
			__func__, sap, rpc_get_port(sap), ep,
			rdma_event_msg(event->event));
		break;
	}

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
	if (connstate == 1) {
		int ird = attr->max_dest_rd_atomic;
		int tird = ep->rep_remote_cma.responder_resources;

		pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
			sap, rpc_get_port(sap),
			ia->ri_device->name,
			ia->ri_ops->ro_displayname,
			xprt->rx_buf.rb_max_requests,
			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
	} else if (connstate < 0) {
		pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
			sap, rpc_get_port(sap), connstate);
	}
#endif

	return 0;
}
Exemple #5
0
static void give_own_timer_callback(unsigned long data)
{
    lte_dev.sdio_thread_kick_own_timer = 1 ;
    wake_up_all(&lte_dev.sdio_thread_wq);
}
Exemple #6
0
/*
 * osprd_ioctl(inode, filp, cmd, arg)
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	osprd_info_t *d = file2osprd(filp);	// device info
	int r = 0;			// return value: initially 0
    DEFINE_WAIT(wait); //using the low level stuff

	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;

	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	if (cmd == OSPRDIOCACQUIRE) {

		// EXERCISE: Lock the ramdisk.
		//
		// If *filp is open for writing (filp_writable), then attempt
		// to write-lock the ramdisk; otherwise attempt to read-lock
		// the ramdisk.
		//
        // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS.
		// Otherwise, if we can grant the lock request, return 0.

		// 'd->ticket_head' and 'd->ticket_tail' should help you
		// service lock requests in order.  These implement a ticket
		// order: 'ticket_tail' is the next ticket, and 'ticket_head'
		// is the ticket currently being served.  You should set a local
		// variable to 'd->ticket_head' and increment 'd->ticket_head'.
		// Then, block at least until 'd->ticket_tail == local_ticket'.
		// (Some of these operations are in a critical section and must
		// be protected by a spinlock; which ones?)

		// Your code here (instead of the next two lines).
        
        if (filp_writable) //means we want the write lock.
        {
            osp_spin_lock(&d->mutex);

            if (d->q_size > 0) //if another proc is waiting, give control to "front of line"
            {
                if (!d->write_lock && !d->read_locks) // no locks except us
                    wake_up_all(&d->blockq); 
                d->q_size++; //add to back of queue
                prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE);
                // add to write queue
                osp_spin_unlock(&d->mutex);
                schedule(); //go to sleep until wake_up_all wakes us

                //wake up
                osp_spin_lock(&d->mutex);
                finish_wait(&d->blockq, &wait); //delete from queue

                d->q_size--;
                //check that wasn't interrupted
                if (signal_pending(current))
                {
                    osp_spin_unlock(&d->mutex);
                    return -ERESTARTSYS;
                }
            }
            // at "front of line." Now check that no readers / writers
            while (d->write_lock || d->read_locks)
            {
                //if the lock is held just go back to back of line.
                prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE);
                d->q_size++;
                osp_spin_unlock(&d->mutex);
                schedule();

                //wake up
                osp_spin_lock(&d->mutex);
                finish_wait(&d->blockq, &wait);
                d->q_size--;
                if (signal_pending(current))
                {
                    osp_spin_unlock(&d->mutex);
                    return -ERESTARTSYS;
                }
            }
            //when this breaks we can get the lock.
            d->write_lock = 1;
            d->write_lock_owner = current->pid;
                
            filp->f_flags |= F_OSPRD_LOCKED;
            osp_spin_unlock(&d->mutex);
        }
        else //we want a read lock
        {
            osp_spin_lock(&d->mutex);
            if (d->q_size > 0) //if another proc is waiting, give control to "front of line"
            {
                if (!d->write_lock && !d->read_locks) // no locks except us
                    wake_up_all(&d->blockq); 
                d->q_size++; //add to back of queue
                prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE);
                // add to write queue
                osp_spin_unlock(&d->mutex);
                schedule(); //go to sleep until wake_up_all wakes us

                //wake up
                osp_spin_lock(&d->mutex);
                finish_wait(&d->blockq, &wait); //delete from queue

                d->q_size--;
                //check that wasn't interrupted
                if (signal_pending(current))
                {
                    osp_spin_unlock(&d->mutex);
                    return -ERESTARTSYS;
                }
            }
            // at "front of line." Now check that no writers (readers ok)
            while (d->write_lock)
            {
                //if the lock is held just go back to back of line.
                prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE);
                d->q_size++;
                osp_spin_unlock(&d->mutex);
                schedule();

                //wake up
                osp_spin_lock(&d->mutex);
                finish_wait(&d->blockq, &wait);
                d->q_size--;
                if (signal_pending(current))
                {
                    osp_spin_unlock(&d->mutex);
                    return -ERESTARTSYS;
                }
            }
            //when this breaks we can get the lock.
            d->read_locks++;
            filp->f_flags |= F_OSPRD_LOCKED;
            osp_spin_unlock(&d->mutex);
        }
	} else if (cmd == OSPRDIOCTRYACQUIRE) {

		// EXERCISE: ATTEMPT to lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		//r = -ENOTTY;
        if (filp_writable)
        {
            //try to get a write lock
            osp_spin_lock(&d->mutex);
            if (d->write_lock || d->read_locks) //if the file is locked, fail
            {
                osp_spin_unlock(&d->mutex);
                return -EBUSY;
            }
            else //no write lock, no read locks.
            {
                //get the write lock
                d->write_lock = 1;
                d->write_lock_owner = current->pid;
                filp->f_flags |= F_OSPRD_LOCKED;
                osp_spin_unlock(&d->mutex);
            }
        }
        else //read lock
        {
            osp_spin_lock(&d->mutex);
            if (d->write_lock) //locked for writing
            {
                osp_spin_unlock(&d->mutex);
                return -EBUSY;
            }
            else
            {
                d->read_locks++;
                filp->f_flags |= F_OSPRD_LOCKED;
                osp_spin_unlock(&d->mutex);
            }
        }
	} else if (cmd == OSPRDIOCRELEASE) {

		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.

		// Your code here (instead of the next line).
        if (!(filp->f_flags & F_OSPRD_LOCKED))
            return -EINVAL; //the file isn't even locked yadingus
        //else

        filp->f_flags &= ~F_OSPRD_LOCKED; //unlock flag
        osp_spin_lock(&d->mutex);
        if (filp_writable) //had a write lock
        {
            d->write_lock = 0; //release the lock
            d->write_lock_owner = -1;
            wake_up_all(&d->blockq); //wake up the queue and get next
        } 
        else //read lock
        {
            d->read_locks--;
            if (!d->read_locks) //wake up the queue if no more readers
                wake_up_all(&d->blockq);
        }
        osp_spin_unlock(&d->mutex);
	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
Exemple #7
0
/*
 * Look-up block in cache, and increment usage count.  If not in cache, read
 * and decompress it from disk.
 */
struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
        struct squashfs_cache *cache, u64 block, int length)
{
    int i, n;
    struct squashfs_cache_entry *entry;

    spin_lock(&cache->lock);

    while (1) {
        for (i = 0; i < cache->entries; i++)
            if (cache->entry[i].block == block)
                break;

        if (i == cache->entries) {
            /*
             * Block not in cache, if all cache entries are used
             * go to sleep waiting for one to become available.
             */
            if (cache->unused == 0) {
                cache->num_waiters++;
                spin_unlock(&cache->lock);
                wait_event(cache->wait_queue, cache->unused);
                spin_lock(&cache->lock);
                cache->num_waiters--;
                continue;
            }

            /*
             * At least one unused cache entry.  A simple
             * round-robin strategy is used to choose the entry to
             * be evicted from the cache.
             */
            i = cache->next_blk;
            for (n = 0; n < cache->entries; n++) {
                if (cache->entry[i].refcount == 0)
                    break;
                i = (i + 1) % cache->entries;
            }

            cache->next_blk = (i + 1) % cache->entries;
            entry = &cache->entry[i];

            /*
             * Initialise chosen cache entry, and fill it in from
             * disk.
             */
            cache->unused--;
            entry->block = block;
            entry->refcount = 1;
            entry->pending = 1;
            entry->num_waiters = 0;
            entry->error = 0;
            spin_unlock(&cache->lock);

            entry->length = squashfs_read_data(sb, entry->data,
                                               block, length, &entry->next_index,
                                               cache->block_size, cache->pages);

            spin_lock(&cache->lock);

            if (entry->length < 0)
                entry->error = entry->length;

            entry->pending = 0;

            /*
             * While filling this entry one or more other processes
             * have looked it up in the cache, and have slept
             * waiting for it to become available.
             */
            if (entry->num_waiters) {
                spin_unlock(&cache->lock);
                wake_up_all(&entry->wait_queue);
            } else
                spin_unlock(&cache->lock);

            goto out;
        }

        /*
         * Block already in cache.  Increment refcount so it doesn't
         * get reused until we're finished with it, if it was
         * previously unused there's one less cache entry available
         * for reuse.
         */
        entry = &cache->entry[i];
        if (entry->refcount == 0)
            cache->unused--;
        entry->refcount++;

        /*
         * If the entry is currently being filled in by another process
         * go to sleep waiting for it to become available.
         */
        if (entry->pending) {
            entry->num_waiters++;
            spin_unlock(&cache->lock);
            wait_event(entry->wait_queue, !entry->pending);
        } else
            spin_unlock(&cache->lock);

        goto out;
    }

out:
    TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
          cache->name, i, entry->block, entry->refcount, entry->error);

    if (entry->error)
        ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
              block);
    return entry;
}
Exemple #8
0
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
	if (atomic_dec_and_test(&bo->cpu_writers))
		wake_up_all(&bo->event_queue);
}
Exemple #9
0
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

	spin_lock(&glob->lru_lock);
	while (ret == -EBUSY) {
		if (unlikely(list_empty(&glob->swap_lru))) {
			spin_unlock(&glob->lru_lock);
			return -EBUSY;
		}

		bo = list_first_entry(&glob->swap_lru,
				      struct ttm_buffer_object, swap);
		kref_get(&bo->list_kref);

		if (!list_empty(&bo->ddestroy)) {
			spin_unlock(&glob->lru_lock);
			(void) ttm_bo_cleanup_refs(bo, false, false, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			spin_lock(&glob->lru_lock);
			continue;
		}

		/**
		 * Reserve buffer. Since we unlock while sleeping, we need
		 * to re-check that nobody removed us from the swap-list while
		 * we slept.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		if (unlikely(ret == -EBUSY)) {
			spin_unlock(&glob->lru_lock);
			ttm_bo_wait_unreserved(bo, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			spin_lock(&glob->lru_lock);
		}
	}

	BUG_ON(ret != 0);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	ttm_bo_list_ref_sub(bo, put_count, true);

	/**
	 * Wait for GPU, then move to system cached.
	 */

	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, false, false);
	spin_unlock(&bo->bdev->fence_lock);

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
					     false, false, false);
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Exemple #10
0
/*
 * osprd_ioctl(inode, filp, cmd, arg)
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	osprd_info_t *d = file2osprd(filp);	// device info
	int r = 0;			// return value: initially 0
	unsigned local_ticket;
	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	// This line avoids compiler warnings; you may remove it.
	//(void) filp_writable, (void) d;

	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	if (cmd == OSPRDIOCACQUIRE) {

		// EXERCISE: Lock the ramdisk.g
		//
		// If *filp is open for writing (filp_writable), then attempt
		// to write-lock the ramdisk; otherwise attempt to read-lock
		// the ramdisk.
		//
                // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS.
		// Otherwise, if we can grant the lock request, return 0.

		// 'd->ticket_head' and 'd->ticket_tail' should help you
		// service lock requests in order.  These implement a ticket
		// order: 'ticket_tail' is the next ticket, and 'ticket_head'
		// is the ticket currently being served.  You should set a local
		// variable to 'd->ticket_head' and increment 'd->ticket_head'.
		// Then, block at least until 'd->ticket_tail == local_ticket'.
		// (Some of these operations are in a critical section and must
		// be protected by a spinlock; which ones?)

		// Your code here (instead of the next two lines).
		//eprintk("Attempting to acquire\n");
		
		// Block
		osp_spin_lock(&(d->mutex));
		local_ticket = d->ticket_head;
		d->ticket_head++;
		osp_spin_unlock(&(d->mutex));
		
		// wait_event_interruptible returns a nonzero value if
		// interrupted by a signal, so return -ERESTARTSYS if it does.	
		for_each_open_file(current, cause_deadlock, d);
		        
		if (d->dead > 1 && (filp->f_flags & F_OSPRD_LOCKED))
			return -EDEADLK;

		if (wait_event_interruptible(d->blockq, d->n_writel == 0
			&& (!filp_writable || d->n_readl == 0)
			&& d->ticket_tail == local_ticket))
			{
				//eprintk ("INTERRUPTED! Head: %u Tail: %u Local: %u\n", d->ticket_head, d->ticket_tail, local_ticket);
				// If this process wasn't the one being served, don't consider the ticket to increment.
				if (d->ticket_tail == local_ticket)
					d->ticket_tail++;
				else d->desync++;
				return -ERESTARTSYS;
			}
		osp_spin_lock(&(d->mutex));
		
		d->dead = 0;
		
		if (d->mutex.lock>0)
			r = 0;
		filp->f_flags |= F_OSPRD_LOCKED;
		if (filp_writable)
			{ d->n_writel++; d->ticket_tail++; }
		else
			{ d->n_readl++; }
		
		osp_spin_unlock(&(d->mutex));
		//wake_up_all(&d->blockq);	
		
		if (!filp_writable)
			d->ticket_tail++;

		r = 0;

	} else if (cmd == OSPRDIOCTRYACQUIRE) {

		// EXERCISE: ATTEMPT to lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		//eprintk("Attempting to try acquire\n");
		local_ticket = d->ticket_head;
		// Check for an existing lock.
		if (filp->f_flags & F_OSPRD_LOCKED || d->n_writel != 0
			|| (filp_writable && d->n_readl != 0)
			|| d->ticket_tail != local_ticket)
		{ r = -EBUSY;} //eprintk("Stopped\n");}
		
		// If *filp is open for writing (filp_writable), then attempt
		// to write-lock the ramdisk; otherwise attempt to read-lock
		// the ramdisk.
		else 
		{
			osp_spin_lock(&(d->mutex));
			d->ticket_head++;
			filp->f_flags |= F_OSPRD_LOCKED;
			if (filp_writable)
			{ d->n_writel++; }
			else
			{ d->n_readl++; }
			if(d->ticket_tail < d->ticket_head)
				d->ticket_tail++;
			osp_spin_unlock(&(d->mutex));
			r = 0;
			wake_up_all(&d->blockq);
		 }

		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// Otherwise, if we can grant the lock request, return 0.

		//r = -ENOTTY;

	} else if (cmd == OSPRDIOCRELEASE) {

		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		if (!(filp->f_flags & F_OSPRD_LOCKED))
			{r = -EINVAL; }
		
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.
		else 
		{
			// Clear lock flag.
			osp_spin_lock(&(d->mutex));
			filp->f_flags &= ~F_OSPRD_LOCKED;
			
			d->n_writel = 0;
			d->n_readl = 0;
			
			osp_spin_unlock(&(d->mutex));
			// Wake queue.
			
			wake_up_all(&d->blockq);		
			
			// Return.
			r = 0;

		}
		

	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
Exemple #11
0
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	osprd_info_t *d = file2osprd(filp);	// device info
	int r = 0;			// return value: initially 0
	unsigned cur_ticket;
	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;

	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	eprintk("%d\n", (int)current->pid);
	if (cmd == OSPRDIOCACQUIRE) {

		// EXERCISE: Lock the ramdisk.
		osp_spin_lock(&d->mutex);
		
		if (check_deadlock(d)) 
		{
			//osp_spin_unlock(&d->mutex);
			//return -EDEADLK;
			r = -EDEADLK;
		}
		else

		{
			add_check_deadlock_list(current->pid, d);
			eprintk ("add_check_deadlock_list");
			cur_ticket = d->ticket_head;
			d->ticket_head ++ ;
		}
		osp_spin_unlock(&d->mutex);
		if (r != 0) {
			return r;
		}

		if(filp_writable)
		{
			//osp_spin_lock(&d->mutex);	
			int w = wait_event_interruptible(d->blockq, (d->number_write_lock==0&&d->number_read_lock==0&&cur_ticket==d->ticket_tail));
			//Blocks the current task on a wait queue until a CONDITION becomes true. 
			//A request for a write lock on a ramdisk file will block until no other files on that 
			//ramdisk have a read or writeloc
			if(w == -ERESTARTSYS)
			{
				osp_spin_lock(&d->mutex);	
				//if the process is interrupted by signal
				if (cur_ticket == d->ticket_tail)
					d->ticket_tail++;
				//already in the next avalable ticket
				else
					d->ticket_head--;
				//destory this ticket
				osp_spin_unlock(&d->mutex);	
				return w;
			}
			
			osp_spin_lock(&d->mutex);	
			//Acquire a mutex (lock the mutex)
			d->ticket_tail++;
			d->write_lock_holder = current->pid;
			d->number_write_lock = 1;
			filp->f_flags |= F_OSPRD_LOCKED;
			osp_spin_unlock(&d->mutex);	
			//Release (unlock) the mutex
		}
		else
		{

			int w = wait_event_interruptible(d->blockq, (d->number_write_lock==0&&cur_ticket==d->ticket_tail));
			//Blocks the current task on a wait queue until a CONDITION becomes true. 
			//A request for a write lock on a ramdisk file will block until no other files on that 
			//ramdisk have a read or writeloc
			if(w == -ERESTARTSYS)
			{
				osp_spin_lock(&d->mutex);
				//if the process is interrupted by signal
				if (cur_ticket == d->ticket_tail)
					d->ticket_tail++;
				//already in the next avalable ticket
				else
					d->ticket_head--;
				osp_spin_unlock(&d->mutex);	
				//destory this ticket
				return w;
			}
			
			osp_spin_lock(&d->mutex);	
			//Acquire a mutex (lock the mutex)
			d->ticket_tail++;
			add_read_pid(current->pid,d);
			d->number_read_lock++;
			filp->f_flags |= F_OSPRD_LOCKED;
			osp_spin_unlock(&d->mutex);	
			//Release (unlock) the mutex
		}

	} else if (cmd == OSPRDIOCTRYACQUIRE) {

		// EXERCISE: ATTEMPT to lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		if (filp_writable)
		{
			//atomically acquire write lock
			osp_spin_lock (&d->mutex);	//atomicity
			if ((d->number_read_lock >0) || (d->number_write_lock>0))
			{
				osp_spin_unlock (&d->mutex);
				return -EBUSY;
			}
			else 	//d->number_read_lock ==0) && (d->number_write_lock==0)
			{
				d->write_lock_holder = current->pid;
				d->number_write_lock ++;
				d->ticket_tail++;
				d->ticket_head++;
				filp -> f_flags |= F_OSPRD_LOCKED;
				osp_spin_unlock (&d->mutex);
			}
		}
		else 	//opened for read 
		{
			//atomically acquire read lock
			osp_spin_lock (&d->mutex);
			{
				if (d->number_write_lock>0)		//can't get read lock
				{
					osp_spin_unlock(&d->mutex);
					return -EBUSY;
				}
				else
				{
					add_read_pid (current->pid,d);
					d->number_read_lock++;
					d->ticket_tail++;
					d->ticket_head++;
					filp -> f_flags |= F_OSPRD_LOCKED;
					osp_spin_unlock (&d->mutex);
				}
			}
		}

		
		//eprintk("Attempting to try acquire\n");
		//r = -ENOTTY;

	} else if (cmd == OSPRDIOCRELEASE) {

		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.
		//osp_spin_lock (&d->mutex);
		if ((filp->f_flags & F_OSPRD_LOCKED)==0)
		{
			//osp_spin_unlock (&d->mutex);
			return -EINVAL;
		}
		else
		{
			osp_spin_lock (&d->mutex);	
			d->check_deadlock_list_head = list_remove_element(d->check_deadlock_list_head,current->pid);
			if (filp_writable)		//release the write locker
			{
				d->write_lock_holder = -1;
				d->number_write_lock --;
			}
			else 	//release the read locker
			{
				d->number_read_lock --;
				d->pid_list_head = list_remove_element(d->pid_list_head,current->pid);
				/*if (list_free_all (pid_list_head) == -ENOTTY)
					return -ENOTTY;*/
				if (d->pid_list_head == NULL)
					return -ENOTTY;
			}
			filp->f_flags &= ~F_OSPRD_LOCKED; 
			
			osp_spin_unlock (&d->mutex);
			wake_up_all (&d->blockq);

		}
		// Your code here (instead of the next line).
		//r = -ENOTTY;

	}
	else
		r = -ENOTTY; /* unknown command */
	return r;

}
Exemple #12
0
static void wait_callback(struct kgsl_device *device, void *priv, u32 id,
                          u32 timestamp, u32 type)
{
    struct adreno_context *drawctxt = priv;
    wake_up_all(&drawctxt->waiting);
}
Exemple #13
0
int vmw_fallback_wait(struct vmw_private *dev_priv,
		      bool lazy,
		      bool fifo_idle,
		      uint32_t seqno,
		      bool interruptible,
		      unsigned long timeout)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;

	uint32_t count = 0;
	uint32_t signal_seq;
	int ret;
	unsigned long end_jiffies = jiffies + timeout;
	bool (*wait_condition)(struct vmw_private *, uint32_t);
	DEFINE_WAIT(__wait);

	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
		&vmw_seqno_passed;

	/**
	 * Block command submission while waiting for idle.
	 */

	if (fifo_idle)
		down_read(&fifo_state->rwsem);
	signal_seq = atomic_read(&dev_priv->marker_seq);
	ret = 0;

	for (;;) {
		prepare_to_wait(&dev_priv->fence_queue, &__wait,
				(interruptible) ?
				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
		if (wait_condition(dev_priv, seqno))
			break;
		if (time_after_eq(jiffies, end_jiffies)) {
			DRM_ERROR("SVGA device lockup.\n");
			break;
		}
		if (lazy)
			schedule_timeout(1);
		else if ((++count & 0x0F) == 0) {
			/**
			 * FIXME: Use schedule_hr_timeout here for
			 * newer kernels and lower CPU utilization.
			 */

			__set_current_state(TASK_RUNNING);
			schedule();
			__set_current_state((interruptible) ?
					    TASK_INTERRUPTIBLE :
					    TASK_UNINTERRUPTIBLE);
		}
		if (interruptible && signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
	}
	finish_wait(&dev_priv->fence_queue, &__wait);
	if (ret == 0 && fifo_idle) {
		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
	}
	wake_up_all(&dev_priv->fence_queue);
	if (fifo_idle)
		up_read(&fifo_state->rwsem);

	return ret;
}
Exemple #14
0
/*
 * This function transfers the ownership of the PCC to the platform
 * So it must be called while holding write_lock(pcc_lock)
 */
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
	int ret = -EIO, i;
	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
	struct acpi_pcct_shared_memory *generic_comm_base =
		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
	unsigned int time_delta;

	/*
	 * For CMD_WRITE we know for a fact the caller should have checked
	 * the channel before writing to PCC space
	 */
	if (cmd == CMD_READ) {
		/*
		 * If there are pending cpc_writes, then we stole the channel
		 * before write completion, so first send a WRITE command to
		 * platform
		 */
		if (pcc_ss_data->pending_pcc_write_cmd)
			send_pcc_cmd(pcc_ss_id, CMD_WRITE);

		ret = check_pcc_chan(pcc_ss_id, false);
		if (ret)
			goto end;
	} else /* CMD_WRITE */
		pcc_ss_data->pending_pcc_write_cmd = FALSE;

	/*
	 * Handle the Minimum Request Turnaround Time(MRTT)
	 * "The minimum amount of time that OSPM must wait after the completion
	 * of a command before issuing the next command, in microseconds"
	 */
	if (pcc_ss_data->pcc_mrtt) {
		time_delta = ktime_us_delta(ktime_get(),
					    pcc_ss_data->last_cmd_cmpl_time);
		if (pcc_ss_data->pcc_mrtt > time_delta)
			udelay(pcc_ss_data->pcc_mrtt - time_delta);
	}

	/*
	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
	 * "The maximum number of periodic requests that the subspace channel can
	 * support, reported in commands per minute. 0 indicates no limitation."
	 *
	 * This parameter should be ideally zero or large enough so that it can
	 * handle maximum number of requests that all the cores in the system can
	 * collectively generate. If it is not, we will follow the spec and just
	 * not send the request to the platform after hitting the MPAR limit in
	 * any 60s window
	 */
	if (pcc_ss_data->pcc_mpar) {
		if (pcc_ss_data->mpar_count == 0) {
			time_delta = ktime_ms_delta(ktime_get(),
						    pcc_ss_data->last_mpar_reset);
			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
				pr_debug("PCC cmd not sent due to MPAR limit");
				ret = -EIO;
				goto end;
			}
			pcc_ss_data->last_mpar_reset = ktime_get();
			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
		}
		pcc_ss_data->mpar_count--;
	}

	/* Write to the shared comm region. */
	writew_relaxed(cmd, &generic_comm_base->command);

	/* Flip CMD COMPLETE bit */
	writew_relaxed(0, &generic_comm_base->status);

	pcc_ss_data->platform_owns_pcc = true;

	/* Ring doorbell */
	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
	if (ret < 0) {
		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
				cmd, ret);
		goto end;
	}

	/* wait for completion and check for PCC errro bit */
	ret = check_pcc_chan(pcc_ss_id, true);

	if (pcc_ss_data->pcc_mrtt)
		pcc_ss_data->last_cmd_cmpl_time = ktime_get();

	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
	else
		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);

end:
	if (cmd == CMD_WRITE) {
		if (unlikely(ret)) {
			for_each_possible_cpu(i) {
				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
				if (!desc)
					continue;

				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
					desc->write_cmd_status = ret;
			}
		}
		pcc_ss_data->pcc_write_cnt++;
		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
	}

	return ret;
}
asmlinkage int netlock_release(void)
{
	DEFINE_WAIT(wait_queue);
	spin_lock(&lock);
	
	if (list_empty(&wait_queue.task_list))
	{
		
		if( reader_count <= 1)
		{
			reader_count = 0;
			read_lock_available = 1;
			write_lock_available = 1;
		}
		else
		{
			reader_count--;
		}
		spin_unlock(&lock);
	}
	else
	{
		wait_queue_head_t *pos;
		int exclusiveFound = 0;
		wait_queue_head_t temp;

        // Save the head of the list
        
		wait_queue_head_t *head = (wait_queue_head_t *) kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
		head->task_list = wait_queue.task_list; 
		pos = head;

		for (pos->task_list = *(&wait_queue.task_list)->next; \
			(pos->task_list.next != *(&wait_queue.task_list.next)) && (pos->task_list.prev != *(&wait_queue.task_list.prev)); \
			pos->task_list = *(pos->task_list.next))

		{
			if (pos->netlock_flag == 1)		//1 indicates exclusive
			{
				if(exclusiveFound == 0)
				{
					exclusiveFound = 1;
					temp = *pos;
				}
			}
			if (pos->netlock_flag == 0)		//1 indicates exclusive
			{
				reader_count++;
			}

		}
		if(exclusiveFound == 1)
		{
			write_lock_available = 0;

			remove_wait_queue(&temp, &wait_queue);
			kfree(pos);
			spin_unlock(&lock);
			wake_up(&temp);
			//prepare_to_wait(&temp, &wait_queue, TASK_INTERRUPTIBLE);
			//finish_wait(&temp, &wait_queue);
		}
		else
		{
			if(reader_count > 0)
			{
				read_lock_available = 0;
				spin_unlock(&lock);
			 	wake_up_all(head);
			}
			else
			{
				spin_unlock(&lock);
			}
		}
		kfree(head);
		
	}

	return 0;
    
}
Exemple #16
0
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
{
	ttm_bo_add_to_lru(bo);
	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
}
Exemple #17
0
void cxl_context_events_pending(struct cxl_context *ctx,
				unsigned int new_events)
{
	atomic_add(new_events, &ctx->afu_driver_events);
	wake_up_all(&ctx->wq);
}
Exemple #18
0
/**
 * bus1_active_cleanup() - cleanup drained object
 * @active:	object to release
 * @waitq:	wait-queue linked to @active, or NULL
 * @cleanup:	cleanup callback, or NULL
 * @userdata:	userdata for callback
 *
 * This performs the final object cleanup. The caller must guarantee that the
 * object is drained, by calling bus1_active_drain().
 *
 * This function invokes the passed cleanup callback on the object. However, it
 * guarantees that this is done exactly once. If there're multiple parallel
 * callers, this will pick one randomly and make all others wait until it is
 * done. If you call this after it was already cleaned up, this is a no-op
 * and only serves as barrier.
 *
 * If @waitq is NULL, the wait is skipped and the call returns immediately. In
 * this case, another thread has entered before, but there is no guarantee that
 * they finished executing the cleanup callback, yet.
 *
 * If @waitq is non-NULL, this call behaves like a down_write(), followed by an
 * up_write(), just like bus1_active_drain(). If @waitq is NULL, this rather
 * behaves like a down_write_trylock(), optionally followed by an up_write().
 *
 * Return: True if this is the thread that released it, false otherwise.
 */
bool bus1_active_cleanup(struct bus1_active *active,
			 wait_queue_head_t *waitq,
			 void (*cleanup) (struct bus1_active *, void *),
			 void *userdata)
{
	int v;

	if (BUS1_WARN_ON(!bus1_active_is_drained(active)))
		return false;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * We pretend this is a down_write_interruptible() and all but
	 * the release-context get interrupted. This is required, as we
	 * cannot call lock_acquired() on multiple threads without
	 * synchronization. Hence, only the release-context will do
	 * this, all others just release the lock.
	 */
	lock_acquire_exclusive(&active->dep_map,/* lock */
			       0,		/* subclass */
			       !waitq,		/* try-lock */
			       NULL,		/* nest underneath */
			       _RET_IP_);	/* IP */
#endif

	/* mark object as RELEASE */
	v = atomic_cmpxchg(&active->count,
			   BUS1_ACTIVE_RELEASE_DIRECT, BUS1_ACTIVE_RELEASE);
	if (v != BUS1_ACTIVE_RELEASE_DIRECT)
		v = atomic_cmpxchg(&active->count,
				   BUS1_ACTIVE_BIAS, BUS1_ACTIVE_RELEASE);

	/*
	 * If this is the thread that marked the object as RELEASE, we
	 * perform the actual release. Otherwise, we wait until the
	 * release is done and the node is marked as DRAINED.
	 */
	if (v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT) {

#ifdef CONFIG_DEBUG_LOCK_ALLOC
		/* we're the release-context and acquired the lock */
		lock_acquired(&active->dep_map, _RET_IP_);
#endif

		if (cleanup)
			cleanup(active, userdata);

		/* mark as DONE */
		atomic_set(&active->count, BUS1_ACTIVE_DONE);
		if (waitq)
			wake_up_all(waitq);
	} else if (waitq) {

#ifdef CONFIG_DEBUG_LOCK_ALLOC
		/* we're contended against the release context */
		lock_contended(&active->dep_map, _RET_IP_);
#endif

		/* wait until object is DRAINED */
		wait_event(*waitq,
			   atomic_read(&active->count) == BUS1_ACTIVE_DONE);
	}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * No-one but the release-context acquired the lock. However,
	 * that does not matter as we simply treat this as
	 * 'interrupted'. Everyone releases the lock, but only one
	 * caller really got it.
	 */
	lock_release(&active->dep_map,	/* lock */
		     1,			/* nested (no-op) */
		     _RET_IP_);		/* instruction pointer */
#endif

	/* true if we released it */
	return v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT;
}
Exemple #19
0
static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
				  unsigned cmd, unsigned long arg)
{
	struct usb_stream_config *cfg;
	struct us122l *us122l = hw->private_data;
	struct usb_stream *s;
	unsigned min_period_frames;
	int err = 0;
	bool high_speed;

	if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
		return -ENOTTY;

	cfg = memdup_user((void *)arg, sizeof(*cfg));
	if (IS_ERR(cfg))
		return PTR_ERR(cfg);

	if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
		err = -ENXIO;
		goto free;
	}
	high_speed = us122l->dev->speed == USB_SPEED_HIGH;
	if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000  &&
	     (!high_speed ||
	      (cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) ||
	    cfg->frame_size != 6 ||
	    cfg->period_frames > 0x3000) {
		err = -EINVAL;
		goto free;
	}
	switch (cfg->sample_rate) {
	case 44100:
		min_period_frames = 48;
		break;
	case 48000:
		min_period_frames = 52;
		break;
	default:
		min_period_frames = 104;
		break;
	}
	if (!high_speed)
		min_period_frames <<= 1;
	if (cfg->period_frames < min_period_frames) {
		err = -EINVAL;
		goto free;
	}

	snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);

	mutex_lock(&us122l->mutex);
	s = us122l->sk.s;
	if (!us122l->master)
		us122l->master = file;
	else if (us122l->master != file) {
		if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
			err = -EIO;
			goto unlock;
		}
		us122l->slave = file;
	}
	if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
	    s->state == usb_stream_xrun) {
		us122l_stop(us122l);
		if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
			err = -EIO;
		else
			err = 1;
	}
unlock:
	mutex_unlock(&us122l->mutex);
free:
	kfree(cfg);
	wake_up_all(&us122l->sk.sleep);
	return err;
}
Exemple #20
0
static void wait_callback(struct kgsl_device *device,
		struct kgsl_context *context, void *priv, int result)
{
	struct adreno_context *drawctxt = priv;
	wake_up_all(&drawctxt->waiting);
}
Exemple #21
0
/**
 * radeon_fence_wait_seq - wait for a specific sequence numbers
 *
 * @rdev: radeon device pointer
 * @target_seq: sequence number(s) we want to wait for
 * @intr: use interruptable sleep
 *
 * Wait for the requested sequence number(s) to be written by any ring
 * (all asics).  Sequnce number array is indexed by ring id.
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the sequence number.  Helper function
 * for radeon_fence_wait_*().
 * Returns 0 if the sequence number has passed, error for all other cases.
 * -EDEADLK is returned when a GPU lockup has been detected.
 */
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
				 bool intr)
{
	uint64_t last_seq[RADEON_NUM_RINGS];
	bool signaled;
	int i, r;

	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {

		/* Save current sequence values, used to check for GPU lockups */
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			if (!target_seq[i])
				continue;

			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
			trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
			radeon_irq_kms_sw_irq_get(rdev, i);
		}

		if (intr) {
			r = wait_event_interruptible_timeout(rdev->fence_queue, (
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
		} else {
			r = wait_event_timeout(rdev->fence_queue, (
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
		}

		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			if (!target_seq[i])
				continue;

			radeon_irq_kms_sw_irq_put(rdev, i);
			trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
		}

		if (unlikely(r < 0))
			return r;

		if (unlikely(!signaled)) {
			if (rdev->needs_reset)
				return -EDEADLK;

			/* we were interrupted for some reason and fence
			 * isn't signaled yet, resume waiting */
			if (r)
				continue;

			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
				if (!target_seq[i])
					continue;

				if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
					break;
			}

			if (i != RADEON_NUM_RINGS)
				continue;

			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
				if (!target_seq[i])
					continue;

				if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
					break;
			}

			if (i < RADEON_NUM_RINGS) {
				/* good news we believe it's a lockup */
				dev_warn(rdev->dev, "GPU lockup (waiting for "
					 "0x%016llx last fence id 0x%016llx on"
					 " ring %d)\n",
					 target_seq[i], last_seq[i], i);

				/* remember that we need an reset */
				rdev->needs_reset = true;
				wake_up_all(&rdev->fence_queue);
				return -EDEADLK;
			}
		}
	}
	return 0;
}
Exemple #22
0
/**
 * adreno_drawctxt_detach(): detach a context from the GPU
 * @context: Generic KGSL context container for the context
 *
 */
int adreno_drawctxt_detach(struct kgsl_context *context)
{
	struct kgsl_device *device;
	struct adreno_device *adreno_dev;
	struct adreno_context *drawctxt;
	struct adreno_ringbuffer *rb;
	int ret;

	if (context == NULL)
		return 0;

	device = context->device;
	adreno_dev = ADRENO_DEVICE(device);
	drawctxt = ADRENO_CONTEXT(context);
	rb = drawctxt->rb;

	/* deactivate context */
	if (rb->drawctxt_active == drawctxt)
		adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);

	mutex_lock(&drawctxt->mutex);

	while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
		struct kgsl_cmdbatch *cmdbatch =
			drawctxt->cmdqueue[drawctxt->cmdqueue_head];

		drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
			ADRENO_CONTEXT_CMDQUEUE_SIZE;

		mutex_unlock(&drawctxt->mutex);

		/*
		 * If the context is deteached while we are waiting for
		 * the next command in GFT SKIP CMD, print the context
		 * detached status here.
		 */
		adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch);

		/*
		 * Don't hold the drawctxt mutex while the cmdbatch is being
		 * destroyed because the cmdbatch destroy takes the device
		 * mutex and the world falls in on itself
		 */

		kgsl_cmdbatch_destroy(cmdbatch);
		mutex_lock(&drawctxt->mutex);
	}

	mutex_unlock(&drawctxt->mutex);
	/*
	 * internal_timestamp is set in adreno_ringbuffer_addcmds,
	 * which holds the device mutex. The entire context destroy
	 * process requires the device mutex as well. But lets
	 * make sure we notice if the locking changes.
	 */
	BUG_ON(!mutex_is_locked(&device->mutex));

	/* Wait for the last global timestamp to pass before continuing */
	ret = adreno_drawctxt_wait_global(adreno_dev, context,
		drawctxt->internal_timestamp, 10 * 1000);

	/*
	 * If the wait for global fails then nothing after this point is likely
	 * to work very well - BUG_ON() so we can take advantage of the debug
	 * tools to figure out what the h - e - double hockey sticks happened
	 */

	BUG_ON(ret);

	kgsl_sharedmem_writel(device, &device->memstore,
			KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
			drawctxt->timestamp);

	kgsl_sharedmem_writel(device, &device->memstore,
			KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
			drawctxt->timestamp);

	adreno_profile_process_results(adreno_dev);

	/* wake threads waiting to submit commands from this context */
	wake_up_all(&drawctxt->waiting);
	wake_up_all(&drawctxt->wq);

	return ret;
}
asmlinkage long submitjob(void *args, int argslen)
{
	struct job *job = NULL;
	struct job *u_args = (struct job *) args;
	int err=0;
	struct queue *q;
	
	if(args == NULL){
		printk("SUBMITJOB: Invalid arguments\n");
		err = -EINVAL;
		goto out;
	}
		
	/* memory allocation for user arguments into kernel space*/
	job = kmalloc(sizeof(struct job), GFP_KERNEL);
	if( job == NULL ){
		printk(
		"SUBMITJOB: Insufficient memory\n");
		err = -ENOMEM;
		goto out;
	}
	
	/* Copying and validation of user space arguments */
	err = copy_from_user(job, u_args, sizeof(struct job));
	if (err != 0){
		printk("SUBMITJOB: copy_from_user failed\n");
		err = -EFAULT;
		goto out;
	}
	
	
	if(job->job_type==ENCRYPT||job->job_type==DECRYPT){
		
	job->key = kmalloc(MD5_KEY_LENGTH, GFP_KERNEL);
	if( job->key == NULL ){
		printk(
		"SUBMITJOB: Insufficient memory\n");
		err = -ENOMEM;
		goto out;
	}
	
	/* Copying and validation of user space arguments */
	err = copy_from_user(u_args->key, u_args->key, MD5_KEY_LENGTH);
	if (err != 0){
		printk("SUBMITJOB: copy_from_user failed\n");
		err = -EFAULT;
		goto out;
	}
	}
	
	else if(job->job_type==REMOVE){
		delete_job_id(prod_cons_q, argslen);
		kfree(job);
		goto out;
	}
	

	printk("job_type %d\n",job->job_type);
	
	job_id++;
	job->job_id = job_id;
	
	
	
top:
	mutex_lock(&big_mutex);
	
	/* adding job to the queue */
	if(prod_cons_q_len < MAX_LEN){
		q = add_job(prod_cons_q, job);
		if(IS_ERR(q)){
			err = PTR_ERR(q);
			goto out;
		}
		else
			prod_cons_q_len++;
	}

	else if(prod_cons_q_len == MAX_LEN){
		
		printk("[sys_submitjob]: Producer going to sleep\n");
		mutex_unlock(&big_mutex);
		wait_event_interruptible(producers, prod_cons_q_len < MAX_LEN);
		goto top;
	}
	
	print_queue(prod_cons_q);
	
	mutex_unlock(&big_mutex);
	
	
	wake_up_all(&consumers);

	
	out:
		return err;
}
Exemple #24
0
// This function is called when a /dev/osprdX file is finally closed.
// (If the file descriptor was dup2ed, this function is called only when the
// last copy is closed.)
static int osprd_close_last(struct inode *inode, struct file *filp)
{
	if (filp) {
		osprd_info_t *d = file2osprd(filp);
		int filp_writable = filp->f_mode & FMODE_WRITE;
        
		// EXERCISE: If the user closes a ramdisk file that holds
		// a lock, release the lock.  Also wake up blocked processes
		// as appropriate.
        
		// Your code here.
        
        //need to be tested
        
        osp_spin_lock(&d->mutex);
        if(filp->f_flags & F_OSPRD_LOCKED)
        {
          
            if(filp_writable)
            {
                d->writelock=0;
                d->write_lock_pid = -1;
            }
            else
            {
                d->readlock--;
				pid_list_t prev = d->read_lock_pids;
				pid_list_t curr = d->read_lock_pids;
				while(curr != NULL)
				{
					if(curr->pid == current->pid)
					{
                        if(prev == NULL)
                            d->read_lock_pids = curr->next;
                        else
                            prev->next = curr->next;
						break;
					}
					else
					{
						prev = curr;
						curr = curr->next;
					}
				}
                 
            }
            
            filp->f_flags  &= ~F_OSPRD_LOCKED;//set to zero
            wake_up_all(&d->blockq);
            osp_spin_unlock(&d->mutex);
        }
        else
        {
            osp_spin_unlock(&d->mutex);
            return 0;
        }
        
        
		// This line avoids compiler warnings; you may remove it.
		(void) filp_writable, (void) d;
        
	}
    
	return 0;
}
Exemple #25
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
 */
STATIC void
xfs_buf_item_unpin(
	struct xfs_log_item	*lip,
	int			remove)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	xfs_buf_t	*bp = bip->bli_buf;
	struct xfs_ail	*ailp = lip->li_ailp;
	int		stale = bip->bli_flags & XFS_BLI_STALE;
	int		freed;

	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);

	trace_xfs_buf_item_unpin(bip);

	freed = atomic_dec_and_test(&bip->bli_refcount);

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);

	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);

		trace_xfs_buf_item_unpin_stale(bip);

		if (remove) {
			/*
			 * We have to remove the log item from the transaction
			 * as we are about to release our reference to the
			 * buffer.  If we don't, the unlock that occurs later
			 * in xfs_trans_uncommit() will ry to reference the
			 * buffer which we no longer have a hold on.
			 */
			xfs_trans_del_item(lip);

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
			XFS_BUF_SET_FSPRIVATE2(bp, NULL);
		}

		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
		 * will take care of that situation.
		 * xfs_trans_ail_delete() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
			XFS_BUF_SET_FSPRIVATE(bp, NULL);
			XFS_BUF_CLR_IODONE_FUNC(bp);
		} else {
			spin_lock(&ailp->xa_lock);
			xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
			xfs_buf_item_relse(bp);
			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
		}
		xfs_buf_relse(bp);
	}
Exemple #26
0
/*
 * osprd_ioctl(inode, filp, cmd, arg)
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
                unsigned int cmd, unsigned long arg)
{
	osprd_info_t *d = file2osprd(filp);	// device info
	int r = 0;			// return value: initially 0
    
	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;
    
	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;
    
	// Set 'r' to the ioctl's return value: 0 on success, negative on error
    
	if (cmd == OSPRDIOCACQUIRE) {
        
		// EXERCISE: Lock the ramdisk.
		//
		// If *filp is open for writing (filp_writable), then attempt
		// to write-lock the ramdisk; otherwise attempt to read-lock
		// the ramdisk.
		//
        // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS.
		// Otherwise, if we can grant the lock request, return 0.
        
		// 'd->ticket_head' and 'd->ticket_tail' should help you
		// service lock requests in order.  These implement a ticket
		// order: 'ticket_tail' is the next ticket, and 'ticket_head'
		// is the ticket currently being served.  You should set a local
		// variable to 'd->ticket_head' and increment 'd->ticket_head'.
		// Then, block at least until 'd->ticket_tail == local_ticket'.
		// (Some of these operations are in a critical section and must
		// be protected by a spinlock; which ones?)
        
		// Your code here (instead of the next two lines).
		
        //eprintk("Attempting to acquire\n");
		r = -ENOTTY;
        
        osp_spin_lock(&d->mutex);
        if(current->pid == d->write_lock_pid)
		{
            osp_spin_unlock(&d->mutex);
			return -EDEADLK;
		}
        
        unsigned local_ticket = d->ticket_head;
        d->ticket_head++;
        
        if(filp_writable)
        {
            
			pid_list_t prev = NULL;
			pid_list_t curr = d->read_lock_pids;
			while(curr != NULL)
			{
				if(curr->pid == current->pid)
				{
					osp_spin_unlock(&d->mutex);
					return -EDEADLK;
				}
				else
				{
					prev = curr;
					curr = curr->next;
				}
			}
            while(d->writelock !=0 || d->readlock!=0 || local_ticket!=d->ticket_tail)
            {
           //  eprintk("in write while\n");
               int returnValue = wait_event_interruptible(d->blockq,1);
                osp_spin_unlock(&d->mutex);
                if(returnValue == -ERESTARTSYS)
                    return returnValue;
                schedule();
                osp_spin_lock(&d->mutex);
            }
            filp->f_flags |= F_OSPRD_LOCKED;
            d->writelock=1;//should be 1
            d->write_lock_pid = current->pid;

        }
        else
        {
          while(d->writelock!=0 || d->ticket_tail != local_ticket)
            {
                
               // eprintk("in read while\n");

                int returnValue = wait_event_interruptible(d->blockq,1);
                osp_spin_unlock(&d->mutex);
                if(returnValue == -ERESTARTSYS)
                    return returnValue;
                schedule();
                osp_spin_lock(&d->mutex);
            }
            filp->f_flags |= F_OSPRD_LOCKED;
            d->readlock++;
            
            pid_list_t prev = NULL;
            pid_list_t curr = d->read_lock_pids;
            while(curr != NULL)
            {
                prev = curr;
                curr = curr->next;
            }
            if(prev == NULL)
            {
                d->read_lock_pids = kmalloc(sizeof(pid_list_t), GFP_ATOMIC);
                d->read_lock_pids->pid = current->pid;
                d->read_lock_pids->next = NULL;
            }
            else
            {
                // assign to next
                prev->next = kmalloc(sizeof(pid_list_t), GFP_ATOMIC);
                prev->next->pid = current->pid;
                prev->next->next = NULL;
            }

        }
    
        d->ticket_tail++;
        osp_spin_unlock(&d->mutex);
        r=0;
        
	}else if (cmd == OSPRDIOCTRYACQUIRE) {
        
		// EXERCISE: ATTEMPT to lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.
        
        
		// Your code here (instead of the next two lines).
		//eprintk("Attempting to try acquire\n");
		r = -ENOTTY;
        if(filp_writable)
        {
            osp_spin_lock(&d->mutex);//should put it here CS
            if(d->writelock==0 && d->readlock==0 && d->ticket_head==d->ticket_tail)
            {
                //if not deadlock
                if(d->write_lock_pid == current->pid)
				{
					osp_spin_unlock(&d->mutex);
					return -EDEADLK;
				}
                else
                {
                filp->f_flags |= F_OSPRD_LOCKED;
                d->writelock=1;
                d->write_lock_pid = current->pid;
                r=0;
                }
            }
            else
            {
                r=-EBUSY;
            }
            osp_spin_unlock(&d->mutex);
            
        }
        
        else
        {
            osp_spin_lock(&d->mutex);//should put it here CS
            if(d->writelock==0 && d->ticket_head==d->ticket_tail)
            {
                //if not deadlock
                filp->f_flags |= F_OSPRD_LOCKED;
                d->readlock++;
                // Add pid to read lock pid lists
				pid_list_t prev = NULL;
				pid_list_t curr = d->read_lock_pids;
				while(curr != NULL)
				{
					prev = curr;
					curr = curr->next;
				}
				if(prev == NULL)
				{
					d->read_lock_pids = kmalloc(sizeof(pid_list_t), GFP_ATOMIC);
					d->read_lock_pids->pid = current->pid;
					d->read_lock_pids->next = NULL;
				}
				else
				{
					// assign to next
					prev->next = kmalloc(sizeof(pid_list_t), GFP_ATOMIC);
					prev->next->pid = current->pid;
					prev->next->next = NULL;
				}
                r=0;
                
            }
            else if(d->write_lock_pid == current->pid)
			{
				osp_spin_unlock(&d->mutex);
                return -EDEADLK;
			}
            else
            {
                r = -EBUSY;
            }
            osp_spin_unlock(&d->mutex);
            
        }
    }
        else if (cmd == OSPRDIOCRELEASE) {
        
		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.
        
		// Your code here (instead of the next line).
		r = -ENOTTY;
        
        if(filp->f_flags & F_OSPRD_LOCKED==0)
        {
            r = -EINVAL;
        }
        else
        {
            osp_spin_lock(&d->mutex);//should put it here CS
            
            if(filp_writable)  // how to know it is read or write lock
            {
                d->writelock=0;
                d->write_lock_pid = -1;
            }
            else
            {
                d->readlock--;
                // Clear this PID from the read lock list
				pid_list_t prev = NULL;
				pid_list_t curr = d->read_lock_pids;
				while(curr != NULL)
				{
					if(curr->pid == current->pid)
					{
                        if(prev == NULL)
                            d->read_lock_pids = curr->next;
                        else
                            prev->next = curr->next;
						kfree(curr);
						break;
					}
					else
					{
						prev = curr;
						curr = curr->next;
					}
				}
                

            }
            filp->f_flags  &= ~F_OSPRD_LOCKED;//set to zero
            wake_up_all(&d->blockq);
            r=0;
            
            osp_spin_unlock(&d->mutex);
        }
        
	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
Exemple #27
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
 */
STATIC void
xfs_buf_item_unpin(
	struct xfs_log_item	*lip,
	int			remove)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	xfs_buf_t	*bp = bip->bli_buf;
	struct xfs_ail	*ailp = lip->li_ailp;
	int		stale = bip->bli_flags & XFS_BLI_STALE;
	int		freed;

	ASSERT(bp->b_fspriv == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);

	trace_xfs_buf_item_unpin(bip);

	freed = atomic_dec_and_test(&bip->bli_refcount);

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);

	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(xfs_buf_islocked(bp));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);

		trace_xfs_buf_item_unpin_stale(bip);

		if (remove) {
			/*
			 * If we are in a transaction context, we have to
			 * remove the log item from the transaction as we are
			 * about to release our reference to the buffer.  If we
			 * don't, the unlock that occurs later in
			 * xfs_trans_uncommit() will try to reference the
			 * buffer which we no longer have a hold on.
			 */
			if (lip->li_desc)
				xfs_trans_del_item(lip);

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
			bp->b_transp = NULL;
		}

		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
		 * will take care of that situation.
		 * xfs_trans_ail_delete() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp);
			bp->b_fspriv = NULL;
			bp->b_iodone = NULL;
		} else {
			spin_lock(&ailp->xa_lock);
			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
			xfs_buf_item_relse(bp);
			ASSERT(bp->b_fspriv == NULL);
		}
		xfs_buf_relse(bp);
	} else if (freed && remove) {
		/*
		 * There are currently two references to the buffer - the active
		 * LRU reference and the buf log item. What we are about to do
		 * here - simulate a failed IO completion - requires 3
		 * references.
		 *
		 * The LRU reference is removed by the xfs_buf_stale() call. The
		 * buf item reference is removed by the xfs_buf_iodone()
		 * callback that is run by xfs_buf_do_callbacks() during ioend
		 * processing (via the bp->b_iodone callback), and then finally
		 * the ioend processing will drop the IO reference if the buffer
		 * is marked XBF_ASYNC.
		 *
		 * Hence we need to take an additional reference here so that IO
		 * completion processing doesn't free the buffer prematurely.
		 */
		xfs_buf_lock(bp);
		xfs_buf_hold(bp);
		bp->b_flags |= XBF_ASYNC;
		xfs_buf_ioerror(bp, EIO);
		XFS_BUF_UNDONE(bp);
		xfs_buf_stale(bp);
		xfs_buf_ioend(bp, 0);
	}
}
Exemple #28
0
/**
 * radeon_fence_process - process a fence
 *
 * @rdev: radeon_device pointer
 * @ring: ring index the fence is associated with
 *
 * Checks the current fence value and wakes the fence queue
 * if the sequence number has increased (all asics).
 */
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
	uint64_t seq, last_seq, last_emitted;
	unsigned count_loop = 0;
	bool wake = false;

	/* Note there is a scenario here for an infinite loop but it's
	 * very unlikely to happen. For it to happen, the current polling
	 * process need to be interrupted by another process and another
	 * process needs to update the last_seq btw the atomic read and
	 * xchg of the current process.
	 *
	 * More over for this to go in infinite loop there need to be
	 * continuously new fence signaled ie radeon_fence_read needs
	 * to return a different value each time for both the currently
	 * polling process and the other process that xchg the last_seq
	 * btw atomic read and xchg of the current process. And the
	 * value the other process set as last seq must be higher than
	 * the seq value we just read. Which means that current process
	 * need to be interrupted after radeon_fence_read and before
	 * atomic xchg.
	 *
	 * To be even more safe we count the number of time we loop and
	 * we bail after 10 loop just accepting the fact that we might
	 * have temporarly set the last_seq not to the true real last
	 * seq but to an older one.
	 */
	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
	do {
		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
		seq = radeon_fence_read(rdev, ring);
		seq |= last_seq & 0xffffffff00000000LL;
		if (seq < last_seq) {
			seq &= 0xffffffff;
			seq |= last_emitted & 0xffffffff00000000LL;
		}

		if (seq <= last_seq || seq > last_emitted) {
			break;
		}
		/* If we loop over we don't want to return without
		 * checking if a fence is signaled as it means that the
		 * seq we just read is different from the previous on.
		 */
		wake = true;
		last_seq = seq;
		if ((count_loop++) > 10) {
			/* We looped over too many time leave with the
			 * fact that we might have set an older fence
			 * seq then the current real last seq as signaled
			 * by the hw.
			 */
			break;
		}
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);

	if (wake) {
		rdev->fence_drv[ring].last_activity = jiffies;
		wake_up_all(&rdev->fence_queue);
	}
}
Exemple #29
0
/*
 * osprd_ioctl(inode, filp, cmd, arg)
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	osprd_info_t *d = file2osprd(filp);	// device info
	int r = 0;			// return value: initially 0

	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;

	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	if (cmd == OSPRDIOCACQUIRE) {

		// EXERCISE: Lock the ramdisk.
		//
		// If *filp is open for writing (filp_writable), then attempt
		// to write-lock the ramdisk; otherwise attempt to read-lock
		// the ramdisk.
		//
                // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS.
		// Otherwise, if we can grant the lock request, return 0.

		// 'd->ticket_head' and 'd->ticket_tail' should help you
		// service lock requests in order.  These implement a ticket
		// order: 'ticket_tail' is the next ticket, and 'ticket_head'
		// is the ticket currently being served.  You should set a local
		// variable to 'd->ticket_head' and increment 'd->ticket_head'.
		// Then, block at least until 'd->ticket_tail == local_ticket'.
		// (Some of these operations are in a critical section and must
		// be protected by a spinlock; which ones?)

		// Your code here (instead of the next two lines).
			/*eprintk("Attempting to acquire\n");
			r = -ENOTTY;*/		
		osp_spin_lock(&(d->mutex));
		if (current->pid == d->current_pid) {
			osp_spin_unlock(&(d->mutex));
			return 0;
		}
		unsigned ticket = d->ticket_head;
		d->ticket_head++;
		osp_spin_unlock(&(d->mutex));

		if (filp_writable) {
			if (wait_event_interruptible(d->blockq, d->ticket_tail == ticket && d->reader_cnt == 0 && d->writer_cnt == 0)
				== -ERESTARTSYS) {
				osp_spin_lock(&(d->mutex));
				if (d->ticket_tail == ticket) {
					d->ticket_tail++;
				}
				else{
					d->exited_tickets[d->exited_tickets_cnt++] = ticket;
					d->exited_tickets_cnt++;
				}
				osp_spin_unlock(&(d->mutex));
				return -ERESTARTSYS;
			}
			osp_spin_lock(&(d->mutex));
			d->writer_cnt++;
			d->current_pid = current->pid;
			filp->f_flags |= F_OSPRD_LOCKED;
			osp_spin_unlock(&(d->mutex));
		}
		else {
			if (wait_event_interruptible(d->blockq, d->ticket_tail == ticket && d->writer_cnt == 0)	== -ERESTARTSYS) {
				osp_spin_lock(&(d->mutex));
				if (d->ticket_tail == ticket) {
					d->ticket_tail++;
				}
				else{
					d->exited_tickets[d->exited_tickets_cnt++] = ticket;
					d->exited_tickets_cnt++;
				}
				osp_spin_unlock(&(d->mutex));
				return -ERESTARTSYS;
			}
			osp_spin_lock(&(d->mutex));
			d->reader_cnt++;
			d->current_pid = current->pid;
			filp->f_flags |= F_OSPRD_LOCKED;
			osp_spin_unlock(&(d->mutex));
		}

		// Check if next ticket is still valid (the process is still alive)
		osp_spin_lock(&(d->mutex));
		d->ticket_tail++;
		int i = 0;
		while (i < d->exited_tickets_cnt) {
			if (d->exited_tickets[i] == d->ticket_tail) {
				d->ticket_tail++;
				d->exited_tickets[i] = d->exited_tickets[--d->exited_tickets_cnt];
				i = 0;
				continue;
			}
			i++;
		}
		osp_spin_unlock(&(d->mutex));
		r = 0;

	} else if (cmd == OSPRDIOCTRYACQUIRE) {

		// EXERCISE: ATTEMPT to lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
			/*eprintk("Attempting to try acquire\n");
			r = -ENOTTY;*/
		osp_spin_lock(&(d->mutex));
		if (current->pid == d->current_pid) {
			osp_spin_unlock(&(d->mutex));
			return 0;
		}
		if (filp_writable) {
			if (d->reader_cnt == 0 && d->writer_cnt == 0){
				d->writer_cnt++;
				filp->f_flags |= F_OSPRD_LOCKED;
				d->current_pid = current->pid;
			} else {
				osp_spin_unlock(&(d->mutex));
				return -EBUSY;
			}
		} else {
			if (d->writer_cnt == 0) {
				d->reader_cnt++;
				filp->f_flags |= F_OSPRD_LOCKED;
				d->current_pid = current->pid;
			} else {
				osp_spin_unlock(&(d->mutex));
				return -EBUSY;
			}
		}
		osp_spin_unlock(&(d->mutex));
		r = 0;

	} else if (cmd == OSPRDIOCRELEASE) {

		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.

		// Your code here (instead of the next line).
			//r = -ENOTTY;
		osp_spin_lock(&(d->mutex));
		if (!(filp->f_flags & F_OSPRD_LOCKED)) {
			osp_spin_unlock(&(d->mutex));
			return -EINVAL;
		}

		if (filp_writable) {
			d->writer_cnt--;
			filp->f_flags &= ~F_OSPRD_LOCKED;
		}
		else {
			d->reader_cnt--;
			if (d->reader_cnt == 0)
				filp->f_flags &= ~F_OSPRD_LOCKED;
		}
		wake_up_all(&(d->blockq));
		osp_spin_unlock(&(d->mutex));
		r = 0;

	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
Exemple #30
0
static inline void __set_state(struct hvsi_struct *hp, int state)
{
	hp->state = state;
	print_state(hp);
	wake_up_all(&hp->stateq);
}