static struct request * noop_former_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; if (rq->queuelist.prev == &nd->queue) return NULL; return list_prev_entry(rq, queuelist); }
/* * coresight_disable_path_from : Disable components in the given path beyond * @nd in the list. If @nd is NULL, all the components, except the SOURCE are * disabled. */ static void coresight_disable_path_from(struct list_head *path, struct coresight_node *nd) { u32 type; struct coresight_device *csdev, *parent, *child; if (!nd) nd = list_first_entry(path, struct coresight_node, link); list_for_each_entry_continue(nd, path, link) { csdev = nd->csdev; type = csdev->type; /* * ETF devices are tricky... They can be a link or a sink, * depending on how they are configured. If an ETF has been * "activated" it will be configured as a sink, otherwise * go ahead with the link configuration. */ if (type == CORESIGHT_DEV_TYPE_LINKSINK) type = (csdev == coresight_get_sink(path)) ? CORESIGHT_DEV_TYPE_SINK : CORESIGHT_DEV_TYPE_LINK; switch (type) { case CORESIGHT_DEV_TYPE_SINK: coresight_disable_sink(csdev); break; case CORESIGHT_DEV_TYPE_SOURCE: /* * We skip the first node in the path assuming that it * is the source. So we don't expect a source device in * the middle of a path. */ WARN_ON(1); break; case CORESIGHT_DEV_TYPE_LINK: parent = list_prev_entry(nd, link)->csdev; child = list_next_entry(nd, link)->csdev; coresight_disable_link(csdev, parent, child); break; default: break; } }
void coresight_disable_path(struct list_head *path) { u32 type; struct coresight_node *nd; struct coresight_device *csdev, *parent, *child; list_for_each_entry(nd, path, link) { csdev = nd->csdev; type = csdev->type; /* * ETF devices are tricky... They can be a link or a sink, * depending on how they are configured. If an ETF has been * "activated" it will be configured as a sink, otherwise * go ahead with the link configuration. */ if (type == CORESIGHT_DEV_TYPE_LINKSINK) type = (csdev == coresight_get_sink(path)) ? CORESIGHT_DEV_TYPE_SINK : CORESIGHT_DEV_TYPE_LINK; switch (type) { case CORESIGHT_DEV_TYPE_SINK: coresight_disable_sink(csdev); break; case CORESIGHT_DEV_TYPE_SOURCE: /* sources are disabled from either sysFS or Perf */ break; case CORESIGHT_DEV_TYPE_LINK: parent = list_prev_entry(nd, link)->csdev; child = list_next_entry(nd, link)->csdev; coresight_disable_link(csdev, parent, child); break; default: break; } }
static int __klp_enable_patch(struct klp_patch *patch) { struct klp_object *obj; int ret; if (klp_transition_patch) return -EBUSY; if (WARN_ON(patch->enabled)) return -EINVAL; /* enforce stacking: only the first disabled patch can be enabled */ if (patch->list.prev != &klp_patches && !list_prev_entry(patch, list)->enabled) return -EBUSY; /* * A reference is taken on the patch module to prevent it from being * unloaded. */ if (!try_module_get(patch->mod)) return -ENODEV; pr_notice("enabling patch '%s'\n", patch->mod->name); klp_init_transition(patch, KLP_PATCHED); /* * Enforce the order of the func->transition writes in * klp_init_transition() and the ops->func_stack writes in * klp_patch_object(), so that klp_ftrace_handler() will see the * func->transition updates before the handler is registered and the * new funcs become visible to the handler. */ smp_wmb(); klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue; ret = klp_pre_patch_callback(obj); if (ret) { pr_warn("pre-patch callback failed for object '%s'\n", klp_is_module(obj) ? obj->name : "vmlinux"); goto err; } ret = klp_patch_object(obj); if (ret) { pr_warn("failed to patch object '%s'\n", klp_is_module(obj) ? obj->name : "vmlinux"); goto err; } } klp_start_transition(); klp_try_complete_transition(); patch->enabled = true; return 0; err: pr_warn("failed to enable patch '%s'\n", patch->mod->name); klp_cancel_transition(); return ret; }