示例#1
0
文件: overlay.c 项目: Lyude/linux
static int overlay_notify(struct overlay_changeset *ovcs,
		enum of_overlay_notify_action action)
{
	struct of_overlay_notify_data nd;
	int i, ret;

	for (i = 0; i < ovcs->count; i++) {
		struct fragment *fragment = &ovcs->fragments[i];

		nd.target = fragment->target;
		nd.overlay = fragment->overlay;

		ret = blocking_notifier_call_chain(&overlay_notify_chain,
						   action, &nd);
		if (ret == NOTIFY_OK || ret == NOTIFY_STOP)
			return 0;
		if (ret) {
			ret = notifier_to_errno(ret);
			pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
			       of_overlay_action_name[action], ret, nd.target);
			return ret;
		}
	}

	return 0;
}
示例#2
0
int set_migratetype_isolate(struct page *page)
{
	struct zone *zone;
	unsigned long flags, pfn;
	struct memory_isolate_notify arg;
	int notifier_ret;
	int ret = -EBUSY;

	zone = page_zone(page);

	spin_lock_irqsave(&zone->lock, flags);

	pfn = page_to_pfn(page);
	arg.start_pfn = pfn;
	arg.nr_pages = pageblock_nr_pages;
	arg.pages_found = 0;

	/*
	 * It may be possible to isolate a pageblock even if the
	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
	 * notifier chain is used by balloon drivers to return the
	 * number of pages in a range that are held by the balloon
	 * driver to shrink memory. If all the pages are accounted for
	 * by balloons, are free, or on the LRU, isolation can continue.
	 * Later, for example, when memory hotplug notifier runs, these
	 * pages reported as "can be isolated" should be isolated(freed)
	 * by the balloon driver through the memory notifier chain.
	 */
	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
	notifier_ret = notifier_to_errno(notifier_ret);
	if (notifier_ret)
		goto out;
	/*
	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
	 * We just check MOVABLE pages.
	 */
	if (!has_unmovable_pages(zone, page, arg.pages_found))
		ret = 0;

	/*
	 * immobile means "not-on-lru" paes. If immobile is larger than
	 * removable-by-driver pages reported by notifier, we'll fail.
	 */

out:
	if (!ret) {
        unsigned long nr_pages;
        int migratetype = get_pageblock_migratetype(page);

		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);

		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
	}

	spin_unlock_irqrestore(&zone->lock, flags);
	if (!ret)
		drain_all_pages();
	return ret;
}
示例#3
0
int of_reconfig_notify(unsigned long action, void *p)
{
	int rc;

	rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
	return notifier_to_errno(rc);
}
示例#4
0
static int busfreq_notify(enum busfreq_event event)
{
	int ret;

	ret = raw_notifier_call_chain(&busfreq_notifier_chain, event, NULL);

	return notifier_to_errno(ret);
}
示例#5
0
int pm_notifier_call_chain(unsigned long val)
{
	int ret;
	pr_info("[%s]: there are %u notify callbacks, event = %lu\n", __func__, notify_count, val);
	ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);

	return notifier_to_errno(ret);
}
示例#6
0
static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
{
	int ret;

	ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
		nr_to_call, nr_calls);

	return notifier_to_errno(ret);
}
示例#7
0
文件: port.c 项目: Lyude/linux
static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
{
	struct raw_notifier_head *nh = &dp->ds->dst->nh;
	int err;

	err = raw_notifier_call_chain(nh, e, v);

	return notifier_to_errno(err);
}
示例#8
0
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
					nr_calls);

	return notifier_to_errno(ret);
}
示例#9
0
文件: cpu.c 项目: 545191228/linux
static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
			int *nr_calls)
{
	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
	void *hcpu = (void *)(long)cpu;

	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
					nr_calls);

	return notifier_to_errno(ret);
}
示例#10
0
文件: qmi_wwan.c 项目: 020gzh/linux
static ssize_t raw_ip_store(struct device *d,  struct device_attribute *attr, const char *buf, size_t len)
{
	struct usbnet *dev = netdev_priv(to_net_dev(d));
	struct qmi_wwan_state *info = (void *)&dev->data;
	bool enable;
	int ret;

	if (strtobool(buf, &enable))
		return -EINVAL;

	/* no change? */
	if (enable == (info->flags & QMI_WWAN_FLAG_RAWIP))
		return len;

	if (!rtnl_trylock())
		return restart_syscall();

	/* we don't want to modify a running netdev */
	if (netif_running(dev->net)) {
		netdev_err(dev->net, "Cannot change a running device\n");
		ret = -EBUSY;
		goto err;
	}

	/* let other drivers deny the change */
	ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev->net);
	ret = notifier_to_errno(ret);
	if (ret) {
		netdev_err(dev->net, "Type change was refused\n");
		goto err;
	}

	if (enable)
		info->flags |= QMI_WWAN_FLAG_RAWIP;
	else
		info->flags &= ~QMI_WWAN_FLAG_RAWIP;
	qmi_wwan_netdev_setup(dev->net);
	call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev->net);
	ret = len;
err:
	rtnl_unlock();
	return ret;
}
示例#11
0
int cpu_down(unsigned int cpu)
{
    int err, notifier_rc;
    void *hcpu = (void *)(long)cpu;
    struct notifier_block *nb = NULL;

    if ( !cpu_hotplug_begin() )
        return -EBUSY;

    if ( (cpu >= NR_CPUS) || (cpu == 0) || !cpu_online(cpu) )
    {
        cpu_hotplug_done();
        return -EINVAL;
    }

    notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, hcpu, &nb);
    if ( notifier_rc != NOTIFY_DONE )
    {
        err = notifier_to_errno(notifier_rc);
        goto fail;
    }

    if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
        goto fail;

    __cpu_die(cpu);
    BUG_ON(cpu_online(cpu));

    notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL);
    BUG_ON(notifier_rc != NOTIFY_DONE);

    send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
    cpu_hotplug_done();
    return 0;

 fail:
    notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, &nb);
    BUG_ON(notifier_rc != NOTIFY_DONE);
    cpu_hotplug_done();
    return err;
}
示例#12
0
int cpu_up(unsigned int cpu)
{
    int notifier_rc, err = 0;
    void *hcpu = (void *)(long)cpu;
    struct notifier_block *nb = NULL;

    if ( !cpu_hotplug_begin() )
        return -EBUSY;

    if ( (cpu >= NR_CPUS) || cpu_online(cpu) || !cpu_present(cpu) )
    {
        cpu_hotplug_done();
        return -EINVAL;
    }

    notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, &nb);
    if ( notifier_rc != NOTIFY_DONE )
    {
        err = notifier_to_errno(notifier_rc);
        goto fail;
    }

    err = __cpu_up(cpu);
    if ( err < 0 )
        goto fail;

    notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
    BUG_ON(notifier_rc != NOTIFY_DONE);

    send_guest_global_virq(dom0, VIRQ_PCPU_STATE);

    cpu_hotplug_done();
    return 0;

 fail:
    notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb);
    BUG_ON(notifier_rc != NOTIFY_DONE);
    cpu_hotplug_done();
    return err;
}
示例#13
0
文件: net-sysfs.c 项目: mkrufky/linux
static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{
	unsigned int orig_len = dev->tx_queue_len;
	int res;

	if (new_len != (unsigned int)new_len)
		return -ERANGE;

	if (new_len != orig_len) {
		dev->tx_queue_len = new_len;
		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
		res = notifier_to_errno(res);
		if (res) {
			netdev_err(dev,
				   "refused to change device tx_queue_len\n");
			dev->tx_queue_len = orig_len;
			return -EFAULT;
		}
	}

	return 0;
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int cpu, i, rc = NOTIFY_STOP;
	struct perf_event *bp;
	unsigned int cmf, resume_mask;

	/*
	 * Do an early return if none of the channels triggered.
	 */
	cmf = sh_ubc->triggered_mask();
	if (unlikely(!cmf))
		return NOTIFY_DONE;

	/*
	 * By default, resume all of the active channels.
	 */
	resume_mask = sh_ubc->active_mask();

	/*
	 * Disable breakpoints during exception handling.
	 */
	sh_ubc->disable_all();

	cpu = get_cpu();
	for (i = 0; i < sh_ubc->num_events; i++) {
		unsigned long event_mask = (1 << i);

		if (likely(!(cmf & event_mask)))
			continue;

		/*
		 * The counter may be concurrently released but that can only
		 * occur from a call_rcu() path. We can then safely fetch
		 * the breakpoint, use its callback, touch its counter
		 * while we are in an rcu_read_lock() path.
		 */
		rcu_read_lock();

		bp = per_cpu(bp_per_reg[i], cpu);
		if (bp)
			rc = NOTIFY_DONE;

		/*
		 * Reset the condition match flag to denote completion of
		 * exception handling.
		 */
		sh_ubc->clear_triggered_mask(event_mask);

		/*
		 * bp can be NULL due to concurrent perf counter
		 * removing.
		 */
		if (!bp) {
			rcu_read_unlock();
			break;
		}

		/*
		 * Don't restore the channel if the breakpoint is from
		 * ptrace, as it always operates in one-shot mode.
		 */
		if (bp->overflow_handler == ptrace_triggered)
			resume_mask &= ~(1 << i);

		perf_bp_event(bp, args->regs);

		/* Deliver the signal to userspace */
		if (arch_check_va_in_userspace(bp->attr.bp_addr,
					       bp->attr.bp_len)) {
			siginfo_t info;

			info.si_signo = args->signr;
			info.si_errno = notifier_to_errno(rc);
			info.si_code = TRAP_HWBKPT;

			force_sig_info(args->signr, &info, current);
		}

		rcu_read_unlock();
	}

	if (cmf == 0)
		rc = NOTIFY_DONE;

	sh_ubc->enable_all(resume_mask);

	put_cpu();

	return rc;
}
示例#15
0
int pm_notifier_call_chain(unsigned long val)
{
	int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);

	return notifier_to_errno(ret);
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int cpu, i, rc = NOTIFY_STOP;
	struct perf_event *bp;
	unsigned int cmf, resume_mask;

	cmf = sh_ubc->triggered_mask();
	if (unlikely(!cmf))
		return NOTIFY_DONE;

	resume_mask = sh_ubc->active_mask();

	sh_ubc->disable_all();

	cpu = get_cpu();
	for (i = 0; i < sh_ubc->num_events; i++) {
		unsigned long event_mask = (1 << i);

		if (likely(!(cmf & event_mask)))
			continue;

		rcu_read_lock();

		bp = per_cpu(bp_per_reg[i], cpu);
		if (bp)
			rc = NOTIFY_DONE;

		sh_ubc->clear_triggered_mask(event_mask);

		if (!bp) {
			rcu_read_unlock();
			break;
		}

		if (bp->overflow_handler == ptrace_triggered)
			resume_mask &= ~(1 << i);

		perf_bp_event(bp, args->regs);

		
		if (!arch_check_bp_in_kernelspace(bp)) {
			siginfo_t info;

			info.si_signo = args->signr;
			info.si_errno = notifier_to_errno(rc);
			info.si_code = TRAP_HWBKPT;

			force_sig_info(args->signr, &info, current);
		}

		rcu_read_unlock();
	}

	if (cmf == 0)
		rc = NOTIFY_DONE;

	sh_ubc->enable_all(resume_mask);

	put_cpu();

	return rc;
}
int exynos_cpufreq_smpl_warn_notify_call_chain(void)
{
	int ret = blocking_notifier_call_chain(&exynos_cpufreq_smpl_warn_notifier_list, 0, NULL);
	return notifier_to_errno(ret);
}
示例#18
0
int online_pages(unsigned long pfn, unsigned long nr_pages)
{
	unsigned long flags;
	unsigned long onlined_pages = 0;
	struct zone *zone;
	int need_zonelists_rebuild = 0;
	int nid;
	int ret;
	struct memory_notify arg;

	arg.start_pfn = pfn;
	arg.nr_pages = nr_pages;
	arg.status_change_nid = -1;

	nid = page_to_nid(pfn_to_page(pfn));
	if (node_present_pages(nid) == 0)
		arg.status_change_nid = nid;

	ret = memory_notify(MEM_GOING_ONLINE, &arg);
	ret = notifier_to_errno(ret);
	if (ret) {
		memory_notify(MEM_CANCEL_ONLINE, &arg);
		return ret;
	}
	/*
	 * This doesn't need a lock to do pfn_to_page().
	 * The section can't be removed here because of the
	 * memory_block->state_sem.
	 */
	zone = page_zone(pfn_to_page(pfn));
	pgdat_resize_lock(zone->zone_pgdat, &flags);
	grow_zone_span(zone, pfn, pfn + nr_pages);
	grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
	pgdat_resize_unlock(zone->zone_pgdat, &flags);

	/*
	 * If this zone is not populated, then it is not in zonelist.
	 * This means the page allocator ignores this zone.
	 * So, zonelist must be updated after online.
	 */
	if (!populated_zone(zone))
		need_zonelists_rebuild = 1;

	walk_memory_resource(pfn, nr_pages, &onlined_pages,
		online_pages_range);
	zone->present_pages += onlined_pages;
	zone->zone_pgdat->node_present_pages += onlined_pages;

	setup_per_zone_pages_min();
	if (onlined_pages) {
		kswapd_run(zone_to_nid(zone));
		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
	}

	if (need_zonelists_rebuild)
		build_all_zonelists();
	vm_total_pages = nr_free_pagecache_pages();
	writeback_set_ratelimit();

	if (onlined_pages)
		memory_notify(MEM_ONLINE, &arg);

	return 0;
}
示例#19
0
static inline int bl_notifier_call_chain(unsigned long val)
{
	int ret = blocking_notifier_call_chain(&bl_chain_head, val, NULL);
	return notifier_to_errno(ret);
}