static int dvb_usb_fe_sleep(struct dvb_frontend *fe) { int ret; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *d = adap_to_d(adap); dev_dbg(&d->udev->dev, "%s: adap=%d fe=%d\n", __func__, adap->id, fe->id); if (!adap->suspend_resume_active) { set_bit(ADAP_SLEEP, &adap->state_bits); wait_on_bit(&adap->state_bits, ADAP_STREAMING, wait_schedule, TASK_UNINTERRUPTIBLE); } if (adap->fe_sleep[fe->id]) { ret = adap->fe_sleep[fe->id](fe); if (ret < 0) goto err; } if (d->props->frontend_ctrl) { ret = d->props->frontend_ctrl(fe, 0); if (ret < 0) goto err; } ret = dvb_usbv2_device_power_ctrl(d, 0); if (ret < 0) goto err; err: if (!adap->suspend_resume_active) { adap->active_fe = -1; clear_bit(ADAP_SLEEP, &adap->state_bits); smp_mb__after_clear_bit(); wake_up_bit(&adap->state_bits, ADAP_SLEEP); } dev_dbg(&d->udev->dev, "%s: ret=%d\n", __func__, ret); return ret; }
static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed) { struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv; struct dvb_usb_device *d = adap_to_d(adap); int ret = 0; struct usb_data_stream_properties stream_props; dev_dbg(&d->udev->dev, "%s: adap=%d active_fe=%d feed_type=%d setting pid [%s]: %04x (%04d) at index %d\n", __func__, adap->id, adap->active_fe, dvbdmxfeed->type, adap->pid_filtering ? "yes" : "no", dvbdmxfeed->pid, dvbdmxfeed->pid, dvbdmxfeed->index); /* wait init is done */ wait_on_bit(&adap->state_bits, ADAP_INIT, TASK_UNINTERRUPTIBLE); if (adap->active_fe == -1) return -EINVAL; /* skip feed setup if we are already feeding */ if (adap->feed_count++ > 0) goto skip_feed_start; /* set 'streaming' status bit */ set_bit(ADAP_STREAMING, &adap->state_bits); /* resolve input and output streaming parameters */ if (d->props->get_stream_config) { memcpy(&stream_props, &adap->props->stream, sizeof(struct usb_data_stream_properties)); ret = d->props->get_stream_config(adap->fe[adap->active_fe], &adap->ts_type, &stream_props); if (ret) dev_err(&d->udev->dev, "%s: get_stream_config() failed=%d\n", KBUILD_MODNAME, ret); } else { stream_props = adap->props->stream; } switch (adap->ts_type) { case DVB_USB_FE_TS_TYPE_204: adap->stream.complete = dvb_usb_data_complete_204; break; case DVB_USB_FE_TS_TYPE_RAW: adap->stream.complete = dvb_usb_data_complete_raw; break; case DVB_USB_FE_TS_TYPE_188: default: adap->stream.complete = dvb_usb_data_complete; break; } /* submit USB streaming packets */ usb_urb_submitv2(&adap->stream, &stream_props); /* enable HW PID filter */ if (adap->pid_filtering && adap->props->pid_filter_ctrl) { ret = adap->props->pid_filter_ctrl(adap, 1); if (ret) dev_err(&d->udev->dev, "%s: pid_filter_ctrl() failed=%d\n", KBUILD_MODNAME, ret); } /* ask device to start streaming */ if (d->props->streaming_ctrl) { ret = d->props->streaming_ctrl(adap->fe[adap->active_fe], 1); if (ret) dev_err(&d->udev->dev, "%s: streaming_ctrl() failed=%d\n", KBUILD_MODNAME, ret); } skip_feed_start: /* add PID to device HW PID filter */ if (adap->pid_filtering && adap->props->pid_filter) { ret = adap->props->pid_filter(adap, dvbdmxfeed->index, dvbdmxfeed->pid, 1); if (ret) dev_err(&d->udev->dev, "%s: pid_filter() failed=%d\n", KBUILD_MODNAME, ret); } if (ret) dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret); return ret; }
static int id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) { int rc = 0; struct key *sidkey; const struct cred *saved_cred; struct cifs_sid *lsid; struct cifs_sid_id *psidid, *npsidid; struct rb_root *cidtree; spinlock_t *cidlock; if (sidtype == SIDOWNER) { cidlock = &siduidlock; cidtree = &uidtree; } else if (sidtype == SIDGROUP) { cidlock = &sidgidlock; cidtree = &gidtree; } else return -EINVAL; spin_lock(cidlock); psidid = sid_rb_search(cidtree, cid); if (!psidid) { spin_unlock(cidlock); npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL); if (!npsidid) return -ENOMEM; npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL); if (!npsidid->sidstr) { kfree(npsidid); return -ENOMEM; } spin_lock(cidlock); psidid = sid_rb_search(cidtree, cid); if (psidid) { ++psidid->refcount; spin_unlock(cidlock); kfree(npsidid->sidstr); kfree(npsidid); } else { psidid = npsidid; sid_rb_insert(cidtree, cid, &psidid, sidtype == SIDOWNER ? "oi:" : "gi:"); ++psidid->refcount; spin_unlock(cidlock); } } else { ++psidid->refcount; spin_unlock(cidlock); } if (test_bit(SID_ID_MAPPED, &psidid->state)) { memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); psidid->time = jiffies; goto id_sid_out; } if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) { rc = -EINVAL; goto id_sid_out; } if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) { saved_cred = override_creds(root_cred); sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, ""); if (IS_ERR(sidkey)) { rc = -EINVAL; cFYI(1, "%s: Can't map and id to a SID", __func__); } else { lsid = (struct cifs_sid *)sidkey->payload.data; memcpy(&psidid->sid, lsid, sidkey->datalen < sizeof(struct cifs_sid) ? sidkey->datalen : sizeof(struct cifs_sid)); memcpy(ssid, &psidid->sid, sidkey->datalen < sizeof(struct cifs_sid) ? sidkey->datalen : sizeof(struct cifs_sid)); set_bit(SID_ID_MAPPED, &psidid->state); key_put(sidkey); kfree(psidid->sidstr); } psidid->time = jiffies; revert_creds(saved_cred); clear_bit(SID_ID_PENDING, &psidid->state); wake_up_bit(&psidid->state, SID_ID_PENDING); } else { rc = wait_on_bit(&psidid->state, SID_ID_PENDING, sidid_pending_wait, TASK_INTERRUPTIBLE); if (rc) { cFYI(1, "%s: sidid_pending_wait interrupted %d", __func__, rc); --psidid->refcount; return rc; } if (test_bit(SID_ID_MAPPED, &psidid->state)) memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); else rc = -EINVAL; } id_sid_out: --psidid->refcount; return rc; }
static int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, struct cifs_fattr *fattr, uint sidtype) { int rc; unsigned long cid; struct key *idkey; const struct cred *saved_cred; struct cifs_sid_id *psidid, *npsidid; struct rb_root *cidtree; spinlock_t *cidlock; if (sidtype == SIDOWNER) { cid = cifs_sb->mnt_uid; cidlock = &siduidlock; cidtree = &uidtree; } else if (sidtype == SIDGROUP) { cid = cifs_sb->mnt_gid; cidlock = &sidgidlock; cidtree = &gidtree; } else return -ENOENT; spin_lock(cidlock); psidid = id_rb_search(cidtree, psid); if (!psidid) { spin_unlock(cidlock); npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL); if (!npsidid) return -ENOMEM; npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL); if (!npsidid->sidstr) { kfree(npsidid); return -ENOMEM; } spin_lock(cidlock); psidid = id_rb_search(cidtree, psid); if (psidid) { ++psidid->refcount; spin_unlock(cidlock); kfree(npsidid->sidstr); kfree(npsidid); } else { psidid = npsidid; id_rb_insert(cidtree, psid, &psidid, sidtype == SIDOWNER ? "os:" : "gs:"); ++psidid->refcount; spin_unlock(cidlock); } } else { ++psidid->refcount; spin_unlock(cidlock); } if (test_bit(SID_ID_MAPPED, &psidid->state)) { cid = psidid->id; psidid->time = jiffies; goto sid_to_id_out; } if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) goto sid_to_id_out; if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) { saved_cred = override_creds(root_cred); idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, ""); if (IS_ERR(idkey)) cFYI(1, "%s: Can't map SID to an id", __func__); else { cid = *(unsigned long *)idkey->payload.value; psidid->id = cid; set_bit(SID_ID_MAPPED, &psidid->state); key_put(idkey); kfree(psidid->sidstr); } revert_creds(saved_cred); psidid->time = jiffies; clear_bit(SID_ID_PENDING, &psidid->state); wake_up_bit(&psidid->state, SID_ID_PENDING); } else { rc = wait_on_bit(&psidid->state, SID_ID_PENDING, sidid_pending_wait, TASK_INTERRUPTIBLE); if (rc) { cFYI(1, "%s: sidid_pending_wait interrupted %d", __func__, rc); --psidid->refcount; return rc; } if (test_bit(SID_ID_MAPPED, &psidid->state)) cid = psidid->id; } sid_to_id_out: --psidid->refcount; if (sidtype == SIDOWNER) fattr->cf_uid = cid; else fattr->cf_gid = cid; return 0; }
/* * afs_lookup_cell - Look up or create a cell record. * @net: The network namespace * @name: The name of the cell. * @namesz: The strlen of the cell name. * @vllist: A colon/comma separated list of numeric IP addresses or NULL. * @excl: T if an error should be given if the cell name already exists. * * Look up a cell record by name and query the DNS for VL server addresses if * needed. Note that that actual DNS query is punted off to the manager thread * so that this function can return immediately if interrupted whilst allowing * cell records to be shared even if not yet fully constructed. */ struct afs_cell *afs_lookup_cell(struct afs_net *net, const char *name, unsigned int namesz, const char *vllist, bool excl) { struct afs_cell *cell, *candidate, *cursor; struct rb_node *parent, **pp; int ret, n; _enter("%s,%s", name, vllist); if (!excl) { rcu_read_lock(); cell = afs_lookup_cell_rcu(net, name, namesz); rcu_read_unlock(); if (!IS_ERR(cell)) goto wait_for_cell; } /* Assume we're probably going to create a cell and preallocate and * mostly set up a candidate record. We can then use this to stash the * name, the net namespace and VL server addresses. * * We also want to do this before we hold any locks as it may involve * upcalling to userspace to make DNS queries. */ candidate = afs_alloc_cell(net, name, namesz, vllist); if (IS_ERR(candidate)) { _leave(" = %ld", PTR_ERR(candidate)); return candidate; } /* Find the insertion point and check to see if someone else added a * cell whilst we were allocating. */ write_seqlock(&net->cells_lock); pp = &net->cells.rb_node; parent = NULL; while (*pp) { parent = *pp; cursor = rb_entry(parent, struct afs_cell, net_node); n = strncasecmp(cursor->name, name, min_t(size_t, cursor->name_len, namesz)); if (n == 0) n = cursor->name_len - namesz; if (n < 0) pp = &(*pp)->rb_left; else if (n > 0) pp = &(*pp)->rb_right; else goto cell_already_exists; } cell = candidate; candidate = NULL; rb_link_node_rcu(&cell->net_node, parent, pp); rb_insert_color(&cell->net_node, &net->cells); atomic_inc(&net->cells_outstanding); write_sequnlock(&net->cells_lock); queue_work(afs_wq, &cell->manager); wait_for_cell: _debug("wait_for_cell"); ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE); smp_rmb(); switch (READ_ONCE(cell->state)) { case AFS_CELL_FAILED: ret = cell->error; goto error; default: _debug("weird %u %d", cell->state, cell->error); goto error; case AFS_CELL_ACTIVE: break; } _leave(" = %p [cell]", cell); return cell; cell_already_exists: _debug("cell exists"); cell = cursor; if (excl) { ret = -EEXIST; } else { afs_get_cell(cursor); ret = 0; } write_sequnlock(&net->cells_lock); kfree(candidate); if (ret == 0) goto wait_for_cell; goto error_noput; error: afs_put_cell(net, cell); error_noput: _leave(" = %d [error]", ret); return ERR_PTR(ret); }
/** * request_key_and_link - Request a key and cache it in a keyring. * @type: The type of key we want. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * @dest_keyring: Where to cache the key. * @flags: Flags to key_alloc(). * * A key matching the specified criteria is searched for in the process's * keyrings and returned with its usage count incremented if found. Otherwise, * if callout_info is not NULL, a key will be allocated and some service * (probably in userspace) will be asked to instantiate it. * * If successfully found or created, the key will be linked to the destination * keyring if one is provided. * * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT * if insufficient key quota was available to create a new key; or -ENOMEM if * insufficient memory was available. * * If the returned key was created, then it may still be under construction, * and wait_for_key_construction() should be used to wait for that to complete. */ struct key *request_key_and_link(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = (KEYRING_SEARCH_DO_STATE_CHECK | KEYRING_SEARCH_SKIP_EXPIRED), }; struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", ctx.index_key.type->name, ctx.index_key.description, callout_info, callout_len, aux, dest_keyring, flags); if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) { key = ERR_PTR(ret); goto error; } } /* search all the process keyrings for a key */ key_ref = search_process_keyrings(&ctx); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); if (dest_keyring) { construct_get_dest_keyring(&dest_keyring); ret = key_link(dest_keyring, key); key_put(dest_keyring); if (ret < 0) { key_put(key); key = ERR_PTR(ret); goto error_free; } } } else if (PTR_ERR(key_ref) != -EAGAIN) { key = ERR_CAST(key_ref); } else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); if (!callout_info) goto error_free; key = construct_key_and_link(&ctx, callout_info, callout_len, aux, dest_keyring, flags); } error_free: if (type->match_free) type->match_free(&ctx.match_data); error: kleave(" = %p", key); return key; } /** * wait_for_key_construction - Wait for construction of a key to complete * @key: The key being waited for. * @intr: Whether to wait interruptibly. * * Wait for a key to finish being constructed. * * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was * revoked or expired. */ int wait_for_key_construction(struct key *key, bool intr) { int ret; ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret) return -ERESTARTSYS; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { smp_rmb(); return key->reject_error; } return key_validate(key); }
static int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, struct cifs_fattr *fattr, uint sidtype) { int rc; unsigned long cid; struct key *idkey; const struct cred *saved_cred; struct cifs_sid_id *psidid, *npsidid; struct rb_root *cidtree; spinlock_t *cidlock; if (sidtype == SIDOWNER) { cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */ cidlock = &siduidlock; cidtree = &uidtree; } else if (sidtype == SIDGROUP) { cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */ cidlock = &sidgidlock; cidtree = &gidtree; } else return -ENOENT; spin_lock(cidlock); psidid = id_rb_search(cidtree, psid); if (!psidid) { /* node does not exist, allocate one & attempt adding */ spin_unlock(cidlock); npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL); if (!npsidid) return -ENOMEM; npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL); if (!npsidid->sidstr) { kfree(npsidid); return -ENOMEM; } spin_lock(cidlock); psidid = id_rb_search(cidtree, psid); if (psidid) { /* node happened to get inserted meanwhile */ ++psidid->refcount; spin_unlock(cidlock); kfree(npsidid->sidstr); kfree(npsidid); } else { psidid = npsidid; id_rb_insert(cidtree, psid, &psidid, sidtype == SIDOWNER ? "os:" : "gs:"); ++psidid->refcount; spin_unlock(cidlock); } } else { ++psidid->refcount; spin_unlock(cidlock); } /* * If we are here, it is safe to access psidid and its fields * since a reference was taken earlier while holding the spinlock. * A reference on the node is put without holding the spinlock * and it is OK to do so in this case, shrinker will not erase * this node until all references are put and we do not access * any fields of the node after a reference is put . */ if (test_bit(SID_ID_MAPPED, &psidid->state)) { cid = psidid->id; psidid->time = jiffies; /* update ts for accessing */ goto sid_to_id_out; } if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) goto sid_to_id_out; if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) { saved_cred = override_creds(root_cred); idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, ""); if (IS_ERR(idkey)) cFYI(1, "%s: Can't map SID to an id", __func__); else { cid = *(unsigned long *)idkey->payload.value; psidid->id = cid; set_bit(SID_ID_MAPPED, &psidid->state); key_put(idkey); kfree(psidid->sidstr); } revert_creds(saved_cred); psidid->time = jiffies; /* update ts for accessing */ clear_bit(SID_ID_PENDING, &psidid->state); wake_up_bit(&psidid->state, SID_ID_PENDING); } else { rc = wait_on_bit(&psidid->state, SID_ID_PENDING, sidid_pending_wait, TASK_INTERRUPTIBLE); if (rc) { cFYI(1, "%s: sidid_pending_wait interrupted %d", __func__, rc); --psidid->refcount; /* decremented without spinlock */ return rc; } if (test_bit(SID_ID_MAPPED, &psidid->state)) cid = psidid->id; } sid_to_id_out: --psidid->refcount; /* decremented without spinlock */ if (sidtype == SIDOWNER) fattr->cf_uid = cid; else fattr->cf_gid = cid; return 0; }
static int id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) { int rc = 0; struct key *sidkey; const struct cred *saved_cred; struct cifs_sid *lsid; struct cifs_sid_id *psidid, *npsidid; struct rb_root *cidtree; spinlock_t *cidlock; if (sidtype == SIDOWNER) { cidlock = &siduidlock; cidtree = &uidtree; } else if (sidtype == SIDGROUP) { cidlock = &sidgidlock; cidtree = &gidtree; } else return -EINVAL; spin_lock(cidlock); psidid = sid_rb_search(cidtree, cid); if (!psidid) { /* node does not exist, allocate one & attempt adding */ spin_unlock(cidlock); npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL); if (!npsidid) return -ENOMEM; npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL); if (!npsidid->sidstr) { kfree(npsidid); return -ENOMEM; } spin_lock(cidlock); psidid = sid_rb_search(cidtree, cid); if (psidid) { /* node happened to get inserted meanwhile */ ++psidid->refcount; spin_unlock(cidlock); kfree(npsidid->sidstr); kfree(npsidid); } else { psidid = npsidid; sid_rb_insert(cidtree, cid, &psidid, sidtype == SIDOWNER ? "oi:" : "gi:"); ++psidid->refcount; spin_unlock(cidlock); } } else { ++psidid->refcount; spin_unlock(cidlock); } /* * If we are here, it is safe to access psidid and its fields * since a reference was taken earlier while holding the spinlock. * A reference on the node is put without holding the spinlock * and it is OK to do so in this case, shrinker will not erase * this node until all references are put and we do not access * any fields of the node after a reference is put . */ if (test_bit(SID_ID_MAPPED, &psidid->state)) { memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); psidid->time = jiffies; /* update ts for accessing */ goto id_sid_out; } if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) { rc = -EINVAL; goto id_sid_out; } if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) { saved_cred = override_creds(root_cred); sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, ""); if (IS_ERR(sidkey)) { rc = -EINVAL; cFYI(1, "%s: Can't map and id to a SID", __func__); } else { lsid = (struct cifs_sid *)sidkey->payload.data; memcpy(&psidid->sid, lsid, sidkey->datalen < sizeof(struct cifs_sid) ? sidkey->datalen : sizeof(struct cifs_sid)); memcpy(ssid, &psidid->sid, sidkey->datalen < sizeof(struct cifs_sid) ? sidkey->datalen : sizeof(struct cifs_sid)); set_bit(SID_ID_MAPPED, &psidid->state); key_put(sidkey); kfree(psidid->sidstr); } psidid->time = jiffies; /* update ts for accessing */ revert_creds(saved_cred); clear_bit(SID_ID_PENDING, &psidid->state); wake_up_bit(&psidid->state, SID_ID_PENDING); } else { rc = wait_on_bit(&psidid->state, SID_ID_PENDING, sidid_pending_wait, TASK_INTERRUPTIBLE); if (rc) { cFYI(1, "%s: sidid_pending_wait interrupted %d", __func__, rc); --psidid->refcount; return rc; } if (test_bit(SID_ID_MAPPED, &psidid->state)) memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); else rc = -EINVAL; } id_sid_out: --psidid->refcount; return rc; }
void wait_on_xbuf(PXIX_BUF xbuf) { might_sleep(); wait_on_bit(&(xbuf->xix_flags), XIX_BUF_FLAGS_OP, xbuf_wait, TASK_UNINTERRUPTIBLE); }