int __sched __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(void *), unsigned mode) { do { int ret; prepare_to_wait_exclusive(wq, &q->wait, mode); if (!test_bit(q->key.bit_nr, q->key.flags)) continue; ret = action(q->key.flags); if (!ret) continue; abort_exclusive_wait(wq, &q->wait, mode, &q->key); return ret; } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); finish_wait(wq, &q->wait); return 0; }
static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT_FUNC(wait, receiver_wake_function); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); error = sock_error(sk); if (error) goto out_err; if (!skb_queue_empty(&sk->sk_receive_queue)) goto out; if (sk->sk_shutdown & RCV_SHUTDOWN) goto out_noerr; error = -ENOTCONN; if (connection_based(sk) && !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) goto out_err; if (signal_pending(current)) goto interrupted; error = 0; *timeo_p = schedule_timeout(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); return error; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; goto out; out_noerr: *err = 0; error = 1; goto out; }
static void cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io) { DEFINE_WAIT(wait); SENTRY; ASSERT(cvp); ASSERT(mp); ASSERT(cvp->cv_magic == CV_MAGIC); ASSERT(mutex_owned(mp)); atomic_inc(&cvp->cv_refs); if (cvp->cv_mutex == NULL) cvp->cv_mutex = mp; /* Ensure the same mutex is used by all callers */ ASSERT(cvp->cv_mutex == mp); prepare_to_wait_exclusive(&cvp->cv_event, &wait, state); atomic_inc(&cvp->cv_waiters); /* Mutex should be dropped after prepare_to_wait() this * ensures we're linked in to the waiters list and avoids the * race where 'cvp->cv_waiters > 0' but the list is empty. */ mutex_exit(mp); if (io) io_schedule(); else schedule(); mutex_enter(mp); /* No more waiters a different mutex could be used */ if (atomic_dec_and_test(&cvp->cv_waiters)) { cvp->cv_mutex = NULL; wake_up(&cvp->cv_destroy); } finish_wait(&cvp->cv_event, &wait); atomic_dec(&cvp->cv_refs); SEXIT; }
static int scull_read(struct file *filp, char __user *buf, size_t size, loff_t *f_off) { struct scull_dev *tmp1; int tmp2, tmp3, tmp6, tmp7, tmp8, result; struct quantum *tmp4; void *tmp5; tmp1 = filp->private_data; tmp2 = *f_off / QUANTUM_SIZE; tmp3 = *f_off % QUANTUM_SIZE; DBPRINTI(size); DBPRINTI((int)*f_off); if (down_interruptible(&tmp1->m)) return -ERESTARTSYS; while (*f_off >= tmp1->sum_len) { DEFINE_WAIT(tmp9); DBPRINT("Wait queue"); up(&tmp1->m); if (filp->f_flags == O_NONBLOCK) return -EAGAIN; prepare_to_wait_exclusive(&tmp1->q, &tmp9, TASK_INTERRUPTIBLE); DBPRINT("waiting"); if (*f_off >= tmp1->sum_len) schedule(); finish_wait(&tmp1->q, &tmp9); if (signal_pending(current)) return -ERESTARTSYS; if (down_interruptible(&tmp1->m)) return -ERESTARTSYS; } tmp4 = follow_quantums(&tmp1->head, tmp2); tmp5 = tmp4->data; tmp5 += tmp3; tmp6 = tmp4->len - tmp3; tmp8 = size > tmp6 ? tmp6 : size; tmp7 = copy_to_user(buf, tmp5, tmp8); result = tmp6 - tmp7; *f_off += result; DBPRINTI(tmp8); up(&tmp1->m); return result; }
/* * returns with the extent buffer spinlocked. * * This will spin and/or wait as required to take the lock, and then * return with the spinlock held. * * After this call, scheduling is not safe without first calling * btrfs_set_lock_blocking() */ int btrfs_tree_lock(struct extent_buffer *eb) { DEFINE_WAIT(wait); wait.func = btrfs_wake_function; if (!btrfs_spin_on_block(eb)) goto sleep; while(1) { spin_nested(eb); /* nobody is blocking, exit with the spinlock held */ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) return 0; /* * we have the spinlock, but the real owner is blocking. * wait for them */ spin_unlock(&eb->lock); /* * spin for a bit, and if the blocking flag goes away, * loop around */ cpu_relax(); if (btrfs_spin_on_block(eb)) continue; sleep: prepare_to_wait_exclusive(&eb->lock_wq, &wait, TASK_UNINTERRUPTIBLE); if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) schedule(); finish_wait(&eb->lock_wq, &wait); } return 0; }
static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, int len, int flags) { struct sock *sk = sock->sk; struct pn_sock *pn = pn_sk(sk); struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; struct task_struct *tsk = current; long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); int err; if (pn_socket_autobind(sock)) return -ENOBUFS; if (len < sizeof(struct sockaddr_pn)) return -EINVAL; if (spn->spn_family != AF_PHONET) return -EAFNOSUPPORT; lock_sock(sk); switch (sock->state) { case SS_UNCONNECTED: if (sk->sk_state != TCP_CLOSE) { err = -EISCONN; goto out; } break; case SS_CONNECTING: err = -EALREADY; goto out; default: err = -EISCONN; goto out; } pn->dobject = pn_sockaddr_get_object(spn); pn->resource = pn_sockaddr_get_resource(spn); sock->state = SS_CONNECTING; err = sk->sk_prot->connect(sk, addr, len); if (err) { sock->state = SS_UNCONNECTED; pn->dobject = 0; goto out; } while (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); if (!timeo) { err = -EINPROGRESS; goto out; } if (signal_pending(tsk)) { err = sock_intr_errno(timeo); goto out; } prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); finish_wait(sk_sleep(sk), &wait); } if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) err = 0; else if (sk->sk_state == TCP_CLOSE_WAIT) err = -ECONNRESET; else err = -ECONNREFUSED; sock->state = err ? SS_UNCONNECTED : SS_CONNECTED; out: release_sock(sk); return err; }
/* * receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { /* return immediately if a client socket has no outstanding * calls */ if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } /* get the next message on the Rx queue */ skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { /* nothing remains on the queue */ if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); /* make sure we wait for the state to be updated in this call */ spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } /* determine whether to continue last data receive */ if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name) { size_t len = sizeof(call->conn->trans->peer->srx); memcpy(msg->msg_name, &call->conn->trans->peer->srx, len); msg->msg_namelen = len; } sock_recv_ts_and_drops(msg, &rx->sk, skb); } /* receive the message */ if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { /* only set the control data once per recvmsg() */ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); if (ret < 0) goto copy_error; /* handle piecemeal consumption of data packets */ _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { /* last byte of reply received */ ret = copied; goto terminal_message; } /* last bit of request received */ if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } /* move on to the next data message */ _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) memcpy(msg->msg_name, &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_ts_and_drops(msg, &rx->sk, skb); } if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); } else { ret = skb_copy_and_csum_datagram_iovec(skb, offset, msg->msg_iov); if (ret == -EINVAL) goto csum_copy_error; } if (ret < 0) goto copy_error; _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { ret = copied; goto terminal_message; } if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
/* * Receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct list_head *l; size_t copied = 0; long timeo; int ret; DEFINE_WAIT(wait); trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); try_again: lock_sock(&rx->sk); /* Return immediately if a client socket has no outstanding calls */ if (RB_EMPTY_ROOT(&rx->calls) && list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); return -ENODATA; } if (list_empty(&rx->recvmsg_q)) { ret = -EWOULDBLOCK; if (timeo == 0) { call = NULL; goto error_no_call; } release_sock(&rx->sk); /* Wait for something to happen */ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (list_empty(&rx->recvmsg_q)) { if (signal_pending(current)) goto wait_interrupted; trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0, 0, 0, 0); timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); goto try_again; } /* Find the next call and dequeue it if we're not just peeking. If we * do dequeue it, that comes with a ref that we will need to release. */ write_lock_bh(&rx->recvmsg_lock); l = rx->recvmsg_q.next; call = list_entry(l, struct rxrpc_call, recvmsg_link); if (!(flags & MSG_PEEK)) list_del_init(&call->recvmsg_link); else rxrpc_get_call(call, rxrpc_call_got); write_unlock_bh(&rx->recvmsg_lock); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) BUG(); if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { if (flags & MSG_CMSG_COMPAT) { unsigned int id32 = call->user_call_ID; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned int), &id32); } else { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned long), &call->user_call_ID); } if (ret < 0) goto error; } if (msg->msg_name) { size_t len = sizeof(call->conn->params.peer->srx); memcpy(msg->msg_name, &call->conn->params.peer->srx, len); msg->msg_namelen = len; } switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); break; case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, flags, &copied); if (ret == -EAGAIN) ret = 0; if (after(call->rx_top, call->rx_hard_ack) && call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK]) rxrpc_notify_socket(call); break; default: ret = 0; break; } if (ret < 0) goto error; if (call->state == RXRPC_CALL_COMPLETE) { ret = rxrpc_recvmsg_term(call, msg); if (ret < 0) goto error; if (!(flags & MSG_PEEK)) rxrpc_release_call(rx, call); msg->msg_flags |= MSG_EOR; ret = 1; } if (ret == 0) msg->msg_flags |= MSG_MORE; else msg->msg_flags &= ~MSG_MORE; ret = copied; error: rxrpc_put_call(call, rxrpc_call_put); error_no_call: release_sock(&rx->sk); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); return ret; wait_interrupted: ret = sock_intr_errno(timeo); wait_error: finish_wait(sk_sleep(&rx->sk), &wait); call = NULL; goto error_no_call; }
/* * osprd_ioctl(inode, filp, cmd, arg) * Called to perform an ioctl on the named file. */ int osprd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { osprd_info_t *d = file2osprd(filp); // device info int r = 0; // return value: initially 0 DEFINE_WAIT(wait); //using the low level stuff // is file open for writing? int filp_writable = (filp->f_mode & FMODE_WRITE) != 0; // This line avoids compiler warnings; you may remove it. (void) filp_writable, (void) d; // Set 'r' to the ioctl's return value: 0 on success, negative on error if (cmd == OSPRDIOCACQUIRE) { // EXERCISE: Lock the ramdisk. // // If *filp is open for writing (filp_writable), then attempt // to write-lock the ramdisk; otherwise attempt to read-lock // the ramdisk. // // This lock request must block using 'd->blockq' until: // 1) no other process holds a write lock; // 2) either the request is for a read lock, or no other process // holds a read lock; and // 3) lock requests should be serviced in order, so no process // that blocked earlier is still blocked waiting for the // lock. // // If a process acquires a lock, mark this fact by setting // 'filp->f_flags |= F_OSPRD_LOCKED'. You also need to // keep track of how many read and write locks are held: // change the 'osprd_info_t' structure to do this. // // Also wake up processes waiting on 'd->blockq' as needed. // // If the lock request would cause a deadlock, return -EDEADLK. // If the lock request blocks and is awoken by a signal, then // return -ERESTARTSYS. // Otherwise, if we can grant the lock request, return 0. // 'd->ticket_head' and 'd->ticket_tail' should help you // service lock requests in order. These implement a ticket // order: 'ticket_tail' is the next ticket, and 'ticket_head' // is the ticket currently being served. You should set a local // variable to 'd->ticket_head' and increment 'd->ticket_head'. // Then, block at least until 'd->ticket_tail == local_ticket'. // (Some of these operations are in a critical section and must // be protected by a spinlock; which ones?) // Your code here (instead of the next two lines). if (filp_writable) //means we want the write lock. { osp_spin_lock(&d->mutex); if (d->q_size > 0) //if another proc is waiting, give control to "front of line" { if (!d->write_lock && !d->read_locks) // no locks except us wake_up_all(&d->blockq); d->q_size++; //add to back of queue prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE); // add to write queue osp_spin_unlock(&d->mutex); schedule(); //go to sleep until wake_up_all wakes us //wake up osp_spin_lock(&d->mutex); finish_wait(&d->blockq, &wait); //delete from queue d->q_size--; //check that wasn't interrupted if (signal_pending(current)) { osp_spin_unlock(&d->mutex); return -ERESTARTSYS; } } // at "front of line." Now check that no readers / writers while (d->write_lock || d->read_locks) { //if the lock is held just go back to back of line. prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE); d->q_size++; osp_spin_unlock(&d->mutex); schedule(); //wake up osp_spin_lock(&d->mutex); finish_wait(&d->blockq, &wait); d->q_size--; if (signal_pending(current)) { osp_spin_unlock(&d->mutex); return -ERESTARTSYS; } } //when this breaks we can get the lock. d->write_lock = 1; d->write_lock_owner = current->pid; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); } else //we want a read lock { osp_spin_lock(&d->mutex); if (d->q_size > 0) //if another proc is waiting, give control to "front of line" { if (!d->write_lock && !d->read_locks) // no locks except us wake_up_all(&d->blockq); d->q_size++; //add to back of queue prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE); // add to write queue osp_spin_unlock(&d->mutex); schedule(); //go to sleep until wake_up_all wakes us //wake up osp_spin_lock(&d->mutex); finish_wait(&d->blockq, &wait); //delete from queue d->q_size--; //check that wasn't interrupted if (signal_pending(current)) { osp_spin_unlock(&d->mutex); return -ERESTARTSYS; } } // at "front of line." Now check that no writers (readers ok) while (d->write_lock) { //if the lock is held just go back to back of line. prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE); d->q_size++; osp_spin_unlock(&d->mutex); schedule(); //wake up osp_spin_lock(&d->mutex); finish_wait(&d->blockq, &wait); d->q_size--; if (signal_pending(current)) { osp_spin_unlock(&d->mutex); return -ERESTARTSYS; } } //when this breaks we can get the lock. d->read_locks++; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); } } else if (cmd == OSPRDIOCTRYACQUIRE) { // EXERCISE: ATTEMPT to lock the ramdisk. // // This is just like OSPRDIOCACQUIRE, except it should never // block. If OSPRDIOCACQUIRE would block or return deadlock, // OSPRDIOCTRYACQUIRE should return -EBUSY. // Otherwise, if we can grant the lock request, return 0. // Your code here (instead of the next two lines). //r = -ENOTTY; if (filp_writable) { //try to get a write lock osp_spin_lock(&d->mutex); if (d->write_lock || d->read_locks) //if the file is locked, fail { osp_spin_unlock(&d->mutex); return -EBUSY; } else //no write lock, no read locks. { //get the write lock d->write_lock = 1; d->write_lock_owner = current->pid; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); } } else //read lock { osp_spin_lock(&d->mutex); if (d->write_lock) //locked for writing { osp_spin_unlock(&d->mutex); return -EBUSY; } else { d->read_locks++; filp->f_flags |= F_OSPRD_LOCKED; osp_spin_unlock(&d->mutex); } } } else if (cmd == OSPRDIOCRELEASE) { // EXERCISE: Unlock the ramdisk. // // If the file hasn't locked the ramdisk, return -EINVAL. // Otherwise, clear the lock from filp->f_flags, wake up // the wait queue, perform any additional accounting steps // you need, and return 0. // Your code here (instead of the next line). if (!(filp->f_flags & F_OSPRD_LOCKED)) return -EINVAL; //the file isn't even locked yadingus //else filp->f_flags &= ~F_OSPRD_LOCKED; //unlock flag osp_spin_lock(&d->mutex); if (filp_writable) //had a write lock { d->write_lock = 0; //release the lock d->write_lock_owner = -1; wake_up_all(&d->blockq); //wake up the queue and get next } else //read lock { d->read_locks--; if (!d->read_locks) //wake up the queue if no more readers wake_up_all(&d->blockq); } osp_spin_unlock(&d->mutex); } else r = -ENOTTY; /* unknown command */ return r; }
/* * osprd_ioctl(inode, filp, cmd, arg) * Called to perform an ioctl on the named file. */ int osprd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int r = 0; // is file open for writing? int filp_writable = (filp->f_mode & FMODE_WRITE) != 0; osprd_info_t *d = file2osprd(filp); // device info DEFINE_WAIT(wait); // wait queue entry in case we block wait.func = &default_wake_function; // This line avoids compiler warnings; you may remove it. (void) filp_writable, (void) d; // Set 'r' to the ioctl's return value: 0 on success, negative on error if (cmd == OSPRDIOCACQUIRE) { // EXERCISE: Lock the ramdisk. // // If *filp is a writable file, then attempt to write-lock // the ramdisk; otherwise attempt to read-lock the ramdisk. // // This lock request must block using 'd->blockq' until: // 1) no other process holds a write lock; // 2) either the request is for a read lock, or no other process // holds a read lock; and // 3) lock requests should be serviced in order, so no process // that blocked earlier is still blocked waiting for the // lock. // // If a process acquires a lock, mark this fact by setting // 'filp->f_flags |= F_OSPRD_LOCKED'. You may also need to // keep track of how many read and write locks are held: // change the 'osprd_info_t' structure to do this. // // Also wake up processes waiting on 'd->blockq' as needed. // // If the lock request would cause a deadlock, return -EDEADLK. // If the lock request blocks and is awoken by a signal, then // return -ERESTARTSYS. // Otherwise, if we can grant the lock request, return 0. // Your code here (instead of the next two lines). if(filp_writable){ // Attempt to take write lock if(d->num_ramdisks_open){ d->num_ramdisks_open = 0; r = -EDEADLK; return r; } if(waitqueue_active(&d->blockq) || d->write_lock_count || d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) { /* Enque writer process and call scheduler if * i. Wait queue is not empty * ii. No. of readers > 0 * iii. No. of writers > 0 * iv. Ramdisk has been locked */ osp_spin_lock(&d->mutex); prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE); osp_spin_unlock(&d->mutex); do{ schedule(); /* if signal has occured, return ERESTARTSYS to caller */ if(signal_pending(current)){ r = -ERESTARTSYS; return r; } }while(d->write_lock_count || d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)); /* All condtions for locking satisfied; unblock (dequeue) */ finish_wait(&d->blockq, &wait); } /* Acquire write lock */ osp_spin_lock(&d->mutex); filp->f_flags |= F_OSPRD_LOCKED; d->write_lock_count++; osp_spin_unlock(&d->mutex); } else { // Attempt to take read lock /* Enque writer process and call scheduler if * i. Wait queue is not empty * ii. No. of writers > 0 * iii. Ramdisk has been locked */ if(waitqueue_active(&d->blockq) || d->write_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) { osp_spin_lock(&d->mutex); prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE); osp_spin_unlock(&d->mutex); do{ schedule(); /* if signal has occured, return ERESTARTSYS to caller */ if(signal_pending(current)){ r = -ERESTARTSYS; return r; } } while(d->write_lock_count || (filp->f_flags & F_OSPRD_LOCKED)); /* All condtions for locking satisfied; unblock (dequeue) */ finish_wait(&d->blockq, &wait); } /* Acquire read lock */ osp_spin_lock(&d->mutex); filp->f_flags |= F_OSPRD_LOCKED; d->read_lock_count++; #if 0 /* Wake up next reader in the queue to ensure that * - when a writer dequeues, all subsequent readers in the queue * till the first writer, are woken up. * - the writer reaches the head of the queue to be called next * * This causues TEST CASE 15 to fail. So I have commented it. */ if(waitqueue_active(&d->blockq)) wake_up(&d->blockq); #endif osp_spin_unlock(&d->mutex); } #if 0 eprintk("Attempting to acquire\n"); r = -ENOTTY; #endif } else if (cmd == OSPRDIOCTRYACQUIRE) { // EXERCISE: ATTEMPT Lock the ramdisk. // // This is just like OSPRDIOCACQUIRE, except it should never // block. If OSPRDIOCACQUIRE would block or return deadlock, // OSPRDIOCTRYACQUIRE should return -EBUSY. // Otherwise, if we can grant the lock request, return 0. // Your code here (instead of the next two lines). if(filp_writable){ // Attempt to take write lock /* Enque writer process and call scheduler if * i. Wait queue is not empty * ii. No. of readers > 0 * iii. No. of writers > 0 * iv. Ramdisk has been locked */ if(waitqueue_active(&d->blockq) || d->write_lock_count || d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) { /* Not able to acuqire write lock; return EBUSY */ r = -EBUSY; return r; } /* Acquire write lock */ osp_spin_lock(&d->mutex); filp->f_flags |= F_OSPRD_LOCKED; d->write_lock_count++; osp_spin_unlock(&d->mutex); } else { /* Enque writer process and call scheduler if * i. Wait queue is not empty * ii. No. of writers > 0 * iii. Ramdisk has been locked */ if(waitqueue_active(&d->blockq) || d->write_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) { /* Not able to acuqire read lock; return EBUSY */ r = -EBUSY; return r; } /* Acquire read lock */ osp_spin_lock(&d->mutex); filp->f_flags |= F_OSPRD_LOCKED; d->read_lock_count++; osp_spin_unlock(&d->mutex); } #if 0 eprintk("Attempting to try acquire\n"); r = -ENOTTY; #endif } else if (cmd == OSPRDIOCRELEASE) { // EXERCISE: Unlock the ramdisk. // // If the file hasn't locked the ramdisk, return -EINVAL. // Otherwise, clear the lock from filp->f_flags, wake up // the wait queue, perform any additional accounting steps // you need, and return 0. // Your code here (instead of the next line). if(!(filp->f_flags & F_OSPRD_LOCKED)) /* you should not be here */ r = -EINVAL; else { /* Release read or write lock as appropriate */ osp_spin_lock(&d->mutex); filp->f_flags &= (~F_OSPRD_LOCKED); if(filp_writable) d->write_lock_count = 0; else d->read_lock_count--; if(waitqueue_active(&d->blockq)) { wake_up(&d->blockq); } //d->num_ramdisks_open--; osp_spin_unlock(&d->mutex); } // r = -ENOTTY; } else r = -ENOTTY; /* unknown command */ return r; }