static ssize_t mycdrv_read (struct file *file, char __user * buf, size_t lbuf, loff_t * ppos) { printk (KERN_INFO "process %i (%s) going to sleep\n", current->pid, current->comm); wait_event_killable (wq, (atomic_read (&data_ready))); printk (KERN_INFO "process %i (%s) awakening\n", current->pid, current->comm); atomic_set (&data_ready, 0); return mycdrv_generic_read (file, buf, lbuf, ppos); }
static int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) { int error; error = wait_event_killable(server->response_q, midQ->midState != MID_REQUEST_SUBMITTED); if (error < 0) return -ERESTARTSYS; return 0; }
static int wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, int *credits) { int rc; spin_lock(&server->req_lock); if (timeout == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (timeout != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; }
static int wait_for_commit(struct sb *sb, unsigned delta) { return wait_event_killable(sb->delta_event_wq, try_flush_pending_until_delta(sb, delta)); }
/* Advance delta transition until specified delta */ static int wait_for_transition(struct sb *sb, unsigned delta) { return wait_event_killable(sb->delta_event_wq, try_delta_transition_until_delta(sb, delta)); }
int show_framebuffer_on_crtc(struct drm_crtc *crtc, struct drm_framebuffer *fb, bool page_flip, struct drm_pending_vblank_event *event) { struct pl111_gem_bo *bo; struct pl111_drm_flip_resource *flip_res; int flips_in_flight; int old_flips_in_flight; crtc->fb = fb; bo = PL111_BO_FROM_FRAMEBUFFER(fb); if (bo == NULL) { DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n"); return -EINVAL; } /* If this is a full modeset, wait for all outstanding flips to complete * before continuing. This avoids unnecessary complication from being * able to queue up multiple modesets and queues of mixed modesets and * page flips. * * Modesets should be uncommon and will not be performant anyway, so * making them synchronous should have negligible performance impact. */ if (!page_flip) { int ret = wait_event_killable(priv.wait_for_flips, atomic_read(&priv.nr_flips_in_flight) == 0); if (ret) return ret; } /* * There can be more 'early display' flips in flight than there are * buffers, and there is (currently) no explicit bound on the number of * flips. Hence, we need a new allocation for each one. * * Note: this could be optimized down if we knew a bound on the flips, * since an application can only have so many buffers in flight to be * useful/not hog all the memory */ flip_res = kmem_cache_alloc(priv.page_flip_slab, GFP_KERNEL); if (flip_res == NULL) { pr_err("kmem_cache_alloc failed to alloc - flip ignored\n"); return -ENOMEM; } /* * increment flips in flight, whilst blocking when we reach * NR_FLIPS_IN_FLIGHT_THRESHOLD */ do { /* * Note: use of assign-and-then-compare in the condition to set * flips_in_flight */ int ret = wait_event_killable(priv.wait_for_flips, (flips_in_flight = atomic_read(&priv.nr_flips_in_flight)) < NR_FLIPS_IN_FLIGHT_THRESHOLD); if (ret != 0) { kmem_cache_free(priv.page_flip_slab, flip_res); return ret; } old_flips_in_flight = atomic_cmpxchg(&priv.nr_flips_in_flight, flips_in_flight, flips_in_flight + 1); } while (old_flips_in_flight != flips_in_flight); flip_res->fb = fb; flip_res->crtc = crtc; flip_res->page_flip = page_flip; flip_res->event = event; INIT_LIST_HEAD(&flip_res->link); DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res); #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS if (bo->gem_object.export_dma_buf != NULL) { struct dma_buf *buf = bo->gem_object.export_dma_buf; unsigned long shared[1] = { 0 }; struct kds_resource *resource_list[1] = { get_dma_buf_kds_resource(buf) }; int err; get_dma_buf(buf); DRM_DEBUG_KMS("Got dma_buf %p\n", buf); /* Wait for the KDS resource associated with this buffer */ err = kds_async_waitall(&flip_res->kds_res_set, &priv.kds_cb, flip_res, fb, 1, shared, resource_list); BUG_ON(err); } else { struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); DRM_DEBUG_KMS("No dma_buf for this flip\n"); /* No dma-buf attached so just call the callback directly */ flip_res->kds_res_set = NULL; pl111_crtc->show_framebuffer_cb(flip_res, fb); } #else if (bo->gem_object.export_dma_buf != NULL) { struct dma_buf *buf = bo->gem_object.export_dma_buf; get_dma_buf(buf); DRM_DEBUG_KMS("Got dma_buf %p\n", buf); } else { DRM_DEBUG_KMS("No dma_buf for this flip\n"); } /* No dma-buf attached to this so just call the callback directly */ { struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); pl111_crtc->show_framebuffer_cb(flip_res, fb); } #endif /* For the same reasons as the wait at the start of this function, * wait for the modeset to complete before continuing. */ if (!page_flip) { int ret = wait_event_killable(priv.wait_for_flips, flips_in_flight == 0); if (ret) return ret; } return 0; }
static int tda998x_edid_delay_wait(struct tda998x_priv *priv) { return wait_event_killable(priv->edid_delay_waitq, !priv->edid_delay_active); }
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; struct plock_xop *xop; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; xop = kzalloc(sizeof(*xop), GFP_NOFS); if (!xop) { rv = -ENOMEM; goto out; } op = &xop->xop; op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.wait = IS_SETLKW(cmd); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->lm_grant) { /* */ op->info.owner = (__u64) fl->fl_pid; xop->callback = fl->fl_lmops->lm_grant; locks_init_lock(&xop->flc); locks_copy_lock(&xop->flc, fl); xop->fl = fl; xop->file = file; } else { op->info.owner = (__u64)(long) fl->fl_owner; xop->callback = NULL; } send_op(op); if (xop->callback == NULL) { rv = wait_event_killable(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { log_debug(ls, "dlm_posix_lock: wait killed %llx", (unsigned long long)number); spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); kfree(xop); do_unlock_close(ls, number, file, fl); goto out; } } else { rv = FILE_LOCK_DEFERRED; goto out; } spin_lock(&ops_lock); if (!list_empty(&op->list)) { log_error(ls, "dlm_posix_lock: op on list %llx", (unsigned long long)number); list_del(&op->list); } spin_unlock(&ops_lock); rv = op->info.rv; if (!rv) { if (posix_lock_file_wait(file, fl) < 0) log_error(ls, "dlm_posix_lock: vfs lock error %llx", (unsigned long long)number); } kfree(xop); out: dlm_put_lockspace(ls); return rv; }
static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { int rc = 0; struct kvec *iov; int n_vec; unsigned int send_length = 0; unsigned int i, j; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { rc = smbd_send(server, rqst); goto smbd_done; } if (ssocket == NULL) return -ENOTSOCK; /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* Generate a rfc1002 marker for SMB2+ */ if (server->vals->header_preamble_size == 0) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 }; iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; send_length += 4; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length); for (j = 0; j < num_rqst; j++) { iov = rqst[j].rq_iov; n_vec = rqst[j].rq_nvec; size = 0; for (i = 0; i < n_vec; i++) { dump_smb(iov[i].iov_base, iov[i].iov_len); size += iov[i].iov_len; } iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst[j].rq_npages; i++) { struct bio_vec bvec; bvec.bv_page = rqst[j].rq_pages[i]; rqst_page_get_length(&rqst[j], i, &bvec.bv_len, &bvec.bv_offset); iov_iter_bvec(&smb_msg.msg_iter, WRITE, &bvec, 1, bvec.bv_len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } } uncork: /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", send_length, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; trace_smb3_partial_send_reconnect(server->CurrentMid, server->hostname); } smbd_done: if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else if (rc > 0) rc = 0; return rc; } static int smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; struct smb2_transform_hdr tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; if (!(flags & CIFS_TRANSFORM_REQ)) return __smb_send_rqst(server, num_rqst, rqst); if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; memset(&cur_rqst[0], 0, sizeof(cur_rqst)); memset(&iov, 0, sizeof(iov)); memset(&tr_hdr, 0, sizeof(tr_hdr)); iov.iov_base = &tr_hdr; iov.iov_len = sizeof(tr_hdr); cur_rqst[0].rq_iov = &iov; cur_rqst[0].rq_nvec = 1; if (!server->ops->init_transform_rq) { cifs_dbg(VFS, "Encryption requested but transform callback " "is missing\n"); return -EIO; } rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) return rc; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = smb_buffer; iov[0].iov_len = 4; iov[1].iov_base = (char *)smb_buffer + 4; iov[1].iov_len = smb_buf_length; return __smb_send_rqst(server, 1, &rqst); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, int *credits) { int rc; spin_lock(&server->req_lock); if (timeout == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (timeout != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int timeout, const int optype) { int *val; val = server->ops->get_credits_field(server, optype); /* Since an echo is already inflight, no need to wait to send another */ if (*val <= 0 && optype == CIFS_ECHO_OP) return -EAGAIN; return wait_for_free_credits(server, timeout, val); }
static int smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); unsigned long send_length; unsigned int i; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; if (ssocket == NULL) return -ENOTSOCK; /* sanity check send length */ send_length = rqst_len(rqst); if (send_length != smb_buf_length + 4) { WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", send_length, smb_buf_length); return -EIO; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); dump_smb(iov[0].iov_base, iov[0].iov_len); /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); size = 0; for (i = 0; i < n_vec; i++) size += iov[i].iov_len; iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst->rq_npages; i++) { size_t len = i == rqst->rq_npages - 1 ? rqst->rq_tailsz : rqst->rq_pagesz; struct bio_vec bvec = { .bv_page = rqst->rq_pages[i], .bv_len = len }; iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } uncork: /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != smb_buf_length + 4)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", smb_buf_length + 4, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; } if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else rc = 0; return rc; } static int smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) { struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = n_vec }; return smb_send_rqst(server, &rqst); } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov; iov.iov_base = smb_buffer; iov.iov_len = smb_buf_length + 4; return smb_sendv(server, &iov, 1); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, int *credits) { int rc; spin_lock(&server->req_lock); if (timeout == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (timeout != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; }