/* called with RTNL */ static int br_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct net_bridge_port *p; struct net_bridge *br; struct switchdev_notifier_fdb_info *fdb_info; int err = NOTIFY_DONE; p = br_port_get_rtnl(dev); if (!p) goto out; br = p->br; switch (event) { case SWITCHDEV_FDB_ADD: fdb_info = ptr; err = br_fdb_external_learn_add(br, p, fdb_info->addr, fdb_info->vid); if (err) err = notifier_from_errno(err); break; case SWITCHDEV_FDB_DEL: fdb_info = ptr; err = br_fdb_external_learn_del(br, p, fdb_info->addr, fdb_info->vid); if (err) err = notifier_from_errno(err); break; } out: return err; }
static int profile_cpu_callback(struct notifier_block *info, unsigned long action, void *__cpu) { int node, cpu = (unsigned long)__cpu; struct page *page; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: node = cpu_to_mem(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) return notifier_from_errno(-ENOMEM); per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) goto out_free; per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); } break; out_free: page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); return notifier_from_errno(-ENOMEM); case CPU_ONLINE: case CPU_ONLINE_FROZEN: if (prof_cpu_mask != NULL) cpumask_set_cpu(cpu, prof_cpu_mask); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: if (prof_cpu_mask != NULL) cpumask_clear_cpu(cpu, prof_cpu_mask); if (per_cpu(cpu_profile_hits, cpu)[0]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu(cpu_profile_hits, cpu)[0] = NULL; __free_page(page); } if (per_cpu(cpu_profile_hits, cpu)[1]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); } break; } return NOTIFY_OK; }
static int macvtap_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvtap_dev *vlantap; struct device *classdev; dev_t devt; int err; char tap_name[IFNAMSIZ]; if (dev->rtnl_link_ops != &macvtap_link_ops) return NOTIFY_DONE; snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); vlantap = netdev_priv(dev); switch (event) { case NETDEV_REGISTER: /* Create the device node here after the network device has * been registered but before register_netdevice has * finished running. */ err = tap_get_minor(macvtap_major, &vlantap->tap); if (err) return notifier_from_errno(err); devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); classdev = device_create(&macvtap_class, &dev->dev, devt, dev, tap_name); if (IS_ERR(classdev)) { tap_free_minor(macvtap_major, &vlantap->tap); return notifier_from_errno(PTR_ERR(classdev)); } err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, tap_name); if (err) return notifier_from_errno(err); break; case NETDEV_UNREGISTER: /* vlan->minor == 0 if NETDEV_REGISTER above failed */ if (vlantap->tap.minor == 0) break; sysfs_remove_link(&dev->dev.kobj, tap_name); devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); device_destroy(&macvtap_class, devt); tap_free_minor(macvtap_major, &vlantap->tap); break; case NETDEV_CHANGE_TX_QUEUE_LEN: if (tap_queue_resize(&vlantap->tap)) return NOTIFY_BAD; break; } return NOTIFY_DONE; }
static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; cpus_read_lock(); jump_label_lock(); switch (val) { case MODULE_STATE_COMING: ret = jump_label_add_module(mod); if (ret) { WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); jump_label_del_module(mod); } break; case MODULE_STATE_GOING: jump_label_del_module(mod); break; case MODULE_STATE_LIVE: jump_label_invalidate_module_init(mod); break; } jump_label_unlock(); cpus_read_unlock(); return notifier_from_errno(ret); }
static int dsa_switch_event(struct notifier_block *nb, unsigned long event, void *info) { struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); int err; switch (event) { case DSA_NOTIFIER_BRIDGE_JOIN: err = dsa_switch_bridge_join(ds, info); break; case DSA_NOTIFIER_BRIDGE_LEAVE: err = dsa_switch_bridge_leave(ds, info); break; default: err = -EOPNOTSUPP; break; } /* Non-switchdev operations cannot be rolled back. If a DSA driver * returns an error during the chained call, switch chips may be in an * inconsistent state. */ if (err) dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", event, err); return notifier_from_errno(err); }
static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct clk_notifier_data *ndata = data; struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb); struct clk_mux *frac_mux = &frac->mux; int ret = 0; pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", __func__, event, ndata->old_rate, ndata->new_rate); if (event == PRE_RATE_CHANGE) { frac->rate_change_idx = frac->mux_ops->get_parent(&frac_mux->hw); if (frac->rate_change_idx != frac->mux_frac_idx) { frac->mux_ops->set_parent(&frac_mux->hw, frac->mux_frac_idx); frac->rate_change_remuxed = 1; } } else if (event == POST_RATE_CHANGE) { /* * The POST_RATE_CHANGE notifier runs directly after the * divider clock is set in clk_change_rate, so we'll have * remuxed back to the original parent before clk_change_rate * reaches the mux itself. */ if (frac->rate_change_remuxed) { frac->mux_ops->set_parent(&frac_mux->hw, frac->rate_change_idx); frac->rate_change_remuxed = 0; } } return notifier_from_errno(ret); }
static int __meminit page_ext_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; int ret = 0; switch (action) { case MEM_GOING_ONLINE: ret = online_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_OFFLINE: offline_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_CANCEL_ONLINE: offline_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_GOING_OFFLINE: break; case MEM_ONLINE: case MEM_CANCEL_OFFLINE: break; } return notifier_from_errno(ret); }
static int fmem_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { int ret = 0; if (fmem_state == FMEM_UNINITIALIZED) return NOTIFY_OK; switch (action) { case MEM_ONLINE: fmem_mem_online_callback(arg); break; case MEM_GOING_OFFLINE: ret = fmem_mem_going_offline_callback(arg); break; case MEM_OFFLINE: fmem_mem_offline_callback(arg); break; case MEM_GOING_ONLINE: case MEM_CANCEL_ONLINE: case MEM_CANCEL_OFFLINE: break; } if (ret) ret = notifier_from_errno(ret); else ret = NOTIFY_OK; return ret; }
static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; switch (val) { case MODULE_STATE_COMING: jump_label_lock(); ret = jump_label_add_module(mod); if (ret) jump_label_del_module(mod); jump_label_unlock(); break; case MODULE_STATE_GOING: jump_label_lock(); jump_label_del_module(mod); jump_label_unlock(); break; case MODULE_STATE_LIVE: jump_label_lock(); jump_label_invalidate_module_init(mod); jump_label_unlock(); break; } return notifier_from_errno(ret); }
/* * Notifier function for switching the muxes to safe parent * while the hfpll is getting reprogrammed. */ static int krait_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { int ret = 0; struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk, clk_nb); /* Switch to safe parent */ if (event == PRE_RATE_CHANGE) { mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw); ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel); mux->reparent = false; /* * By the time POST_RATE_CHANGE notifier is called, * clk framework itself would have changed the parent for the new rate. * Only otherwise, put back to the old parent. */ } else if (event == POST_RATE_CHANGE) { if (!mux->reparent) ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->old_index); } return notifier_from_errno(ret); }
static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp) { int result = 0; struct devfreq *devfreq = devp; switch (type) { case ADRENO_DEVFREQ_NOTIFY_IDLE: case ADRENO_DEVFREQ_NOTIFY_RETIRE: mutex_lock(&devfreq->lock); result = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); /* Nofifying partner bus governor if any */ if (partner_gpu_profile && partner_gpu_profile->bus_devfreq) { mutex_lock(&partner_gpu_profile->bus_devfreq->lock); update_devfreq(partner_gpu_profile->bus_devfreq); mutex_unlock(&partner_gpu_profile->bus_devfreq->lock); } break; /* ignored by this governor */ case ADRENO_DEVFREQ_NOTIFY_SUBMIT: default: break; } return notifier_from_errno(result); }
static int otg_notifier_callback(struct notifier_block *nb, unsigned long event, void *param) { struct otg_state_work *state_work; pr_info("%s event=%s(%lu)\n", __func__, event_string(event), event); if (!u_notify) { pr_err("u_notify is NULL\n"); return NOTIFY_DONE; } if (event > NOTIFY_EVENT_VBUSPOWER) { pr_err("%s event is invalid\n", __func__); return NOTIFY_DONE; } state_work = kmalloc(sizeof(struct otg_state_work), GFP_ATOMIC); if (!state_work) { pr_err("unable to allocate state_work\n"); return notifier_from_errno(-ENOMEM); } INIT_WORK(&state_work->otg_work, otg_notify_work); state_work->event = event; state_work->enable = *(int *)param; queue_work(u_notify->notifier_wq, &state_work->otg_work); return NOTIFY_OK; }
static int update_clusterinfo( struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int err = 0; switch (action) { case CPU_UP_PREPARE: per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID; if ( !cluster_cpus_spare ) cluster_cpus_spare = xzalloc(cpumask_t); if ( !cluster_cpus_spare || !alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) ) err = -ENOMEM; break; case CPU_UP_CANCELED: case CPU_DEAD: if ( per_cpu(cluster_cpus, cpu) ) { cpumask_clear_cpu(cpu, per_cpu(cluster_cpus, cpu)); if ( cpumask_empty(per_cpu(cluster_cpus, cpu)) ) xfree(per_cpu(cluster_cpus, cpu)); } free_cpumask_var(per_cpu(scratch_mask, cpu)); break; } return !err ? NOTIFY_DONE : notifier_from_errno(err); }
/** * cmm_memory_isolate_cb - Handle memory isolation notifier calls * @self: notifier block struct * @action: action to take * @arg: struct memory_isolate_notify data for handler * * Return value: * NOTIFY_OK or notifier error based on subfunction return value **/ static int cmm_memory_isolate_cb(struct notifier_block *self, unsigned long action, void *arg) { int ret = 0; if (action == MEM_ISOLATE_COUNT) ret = cmm_count_pages(arg); return notifier_from_errno(ret); }
/* * We use the notifier function for switching to a temporary safe configuration * (mux and divider), while the A53 PLL is reconfigured. */ static int a53cc_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { int ret = 0; struct clk_regmap_mux_div *md = container_of(nb, struct clk_regmap_mux_div, clk_nb); if (event == PRE_RATE_CHANGE) /* set the mux and divider to safe frequency (400mhz) */ ret = mux_div_set_src_div(md, 4, 3); return notifier_from_errno(ret); }
/* * This clock notifier is called when the frequency of the of the parent * PLL clock is to be changed. We use the xtal input as temporary parent * while the PLL frequency is stabilized. */ static int meson_clk_cpu_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct clk_notifier_data *ndata = data; struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_nb(nb); int ret = 0; if (event == PRE_RATE_CHANGE) ret = meson_clk_cpu_pre_rate_change(clk_cpu, ndata); else if (event == POST_RATE_CHANGE) ret = meson_clk_cpu_post_rate_change(clk_cpu, ndata); return notifier_from_errno(ret); }
static int omap2430_musb_otg_notifications (struct musb *musb, unsigned long event) { struct musb_otg_work *otg_work; otg_work = kmalloc(sizeof(struct musb_otg_work), GFP_ATOMIC); if (!otg_work) return notifier_from_errno(-ENOMEM); INIT_WORK(&otg_work->work, musb_otg_notifier_work); otg_work->xceiv_event = event; otg_work->musb = musb; pr_info("%s recheck event=%lu\n", __func__, event); queue_work(musb->otg_notifier_wq, &otg_work->work); return 0; }
static int musb_otg_notifications(struct notifier_block *nb, unsigned long event, void *unused) { struct musb *musb = container_of(nb, struct musb, nb); struct musb_otg_work *otg_work; otg_work = kmalloc(sizeof(struct musb_otg_work), GFP_ATOMIC); if (!otg_work) return notifier_from_errno(-ENOMEM); INIT_WORK(&otg_work->work, musb_otg_notifier_work); otg_work->xceiv_event = event; otg_work->musb = musb; queue_work(musb->otg_notifier_wq, &otg_work->work); return 0; }
static int comp_pool_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct ehca_cpu_comp_task *cct; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); if (!create_comp_task(pool, cpu)) { ehca_gen_err("Can't create comp_task for cpu: %x", cpu); return notifier_from_errno(-ENOMEM); } break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); kthread_bind(cct->task, cpumask_any(cpu_online_mask)); destroy_comp_task(pool, cpu); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); kthread_bind(cct->task, cpu); wake_up_process(cct->task); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); destroy_comp_task(pool, cpu); take_over_work(pool, cpu); break; } return NOTIFY_OK; }
static int of_i2c_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; struct i2c_adapter *adap; struct i2c_client *client; switch (of_reconfig_get_state_change(action, rd)) { case OF_RECONFIG_CHANGE_ADD: adap = of_find_i2c_adapter_by_node(rd->dn->parent); if (adap == NULL) return NOTIFY_OK; /* not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { put_device(&adap->dev); return NOTIFY_OK; } client = of_i2c_register_device(adap, rd->dn); put_device(&adap->dev); if (IS_ERR(client)) { dev_err(&adap->dev, "failed to create client for '%pOF'\n", rd->dn); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } break; case OF_RECONFIG_CHANGE_REMOVE: /* already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* find our device by node */ client = of_find_i2c_device_by_node(rd->dn); if (client == NULL) return NOTIFY_OK; /* no? not meant for us */ /* unregister takes one ref away */ i2c_unregister_device(client); /* and put the reference of the find */ put_device(&client->dev); break; } return NOTIFY_OK; }
/* * This clock notifier is called when the frequency of the parent clock * of cpuclk is to be changed. This notifier handles the setting up all * the divider clocks, remux to temporary parent and handling the safe * frequency levels when using temporary parent. */ static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct clk_notifier_data *ndata = data; struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_nb(nb); int ret = 0; pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", __func__, event, ndata->old_rate, ndata->new_rate); if (event == PRE_RATE_CHANGE) ret = rockchip_cpuclk_pre_rate_change(cpuclk, ndata); else if (event == POST_RATE_CHANGE) ret = rockchip_cpuclk_post_rate_change(cpuclk, ndata); return notifier_from_errno(ret); }
static int gpu_lpc_notifier(struct notifier_block *nb, unsigned long event, void *cmd) { struct kbase_device *kbdev = pkbdev; int err = NOTIFY_DONE; unsigned long flags; switch (event) { case LPC_PREPARE: spin_lock_irqsave(&kbdev->pm.metrics.lock, flags); if(kbdev->pm.metrics.gpu_active) err = notifier_from_errno(-EBUSY); spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "LPC prepare\n"); break; } return err; }
static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int err = 0; switch (action) { case CPU_UP_PREPARE: err = msr_device_create(cpu); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: msr_device_destroy(cpu); break; } return notifier_from_errno(err); }
static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp) { int result = 0; struct devfreq *devfreq = devp; switch (type) { case ADRENO_DEVFREQ_NOTIFY_IDLE: case ADRENO_DEVFREQ_NOTIFY_RETIRE: mutex_lock(&devfreq->lock); result = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); break; /* ignored by this governor */ case ADRENO_DEVFREQ_NOTIFY_SUBMIT: default: break; } return notifier_from_errno(result); }
static int pseries_memory_notifier(struct notifier_block *nb, unsigned long action, void *node) { int err = 0; switch (action) { case PSERIES_RECONFIG_ADD: err = pseries_add_memory(node); break; case PSERIES_RECONFIG_REMOVE: err = pseries_remove_memory(node); break; case PSERIES_DRCONF_MEM_ADD: case PSERIES_DRCONF_MEM_REMOVE: err = pseries_drconf_memory(node, action); break; } return notifier_from_errno(err); }
static int err_inject_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: err = cpu_up_prepare_error; break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: err = cpu_down_prepare_error; break; } if (err) printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err); return notifier_from_errno(err); }
static int pseries_memory_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct of_reconfig_data *rd = data; int err = 0; switch (action) { case OF_RECONFIG_ATTACH_NODE: err = pseries_add_mem_node(rd->dn); break; case OF_RECONFIG_DETACH_NODE: err = pseries_remove_mem_node(rd->dn); break; case OF_RECONFIG_UPDATE_PROPERTY: if (!strcmp(rd->prop->name, "ibm,dynamic-memory")) err = pseries_update_drconf_memory(rd); break; } return notifier_from_errno(err); }
static int topology_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: rc = topology_add_dev(cpu); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: topology_remove_dev(cpu); break; } return notifier_from_errno(rc); }
static int pseries_memory_notifier(struct notifier_block *nb, unsigned long action, void *node) { struct of_prop_reconfig *pr; int err = 0; switch (action) { case OF_RECONFIG_ATTACH_NODE: err = pseries_add_memory(node); break; case OF_RECONFIG_DETACH_NODE: err = pseries_remove_memory(node); break; case OF_RECONFIG_UPDATE_PROPERTY: pr = (struct of_prop_reconfig *)node; if (!strcmp(pr->prop->name, "ibm,dynamic-memory")) err = pseries_update_drconf_memory(pr); break; } return notifier_from_errno(err); }
/** * cmm_memory_cb - Handle memory hotplug notifier calls * @self: notifier block struct * @action: action to take * @arg: struct memory_notify data for handler * * Return value: * NOTIFY_OK or notifier error based on subfunction return value * **/ static int cmm_memory_cb(struct notifier_block *self, unsigned long action, void *arg) { int ret = 0; switch (action) { case MEM_GOING_OFFLINE: mutex_lock(&hotplug_mutex); hotplug_occurred = 1; ret = cmm_mem_going_offline(arg); break; case MEM_OFFLINE: case MEM_CANCEL_OFFLINE: mutex_unlock(&hotplug_mutex); cmm_dbg("Memory offline operation complete.\n"); break; case MEM_GOING_ONLINE: case MEM_ONLINE: case MEM_CANCEL_ONLINE: break; } return notifier_from_errno(ret); }