Exemplo n.º 1
0
static int br_pass_frame_up(struct sk_buff *skb)
{
	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
	struct net_bridge *br = netdev_priv(brdev);
	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);

	u64_stats_update_begin(&brstats->syncp);
	brstats->rx_packets++;
	brstats->rx_bytes += skb->len;
	u64_stats_update_end(&brstats->syncp);

	indev = skb->dev;
	skb->dev = brdev;

	br_drop_fake_rtable(skb);

	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
		       netif_receive_skb);
}
Exemplo n.º 2
0
/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
				    const struct sw_flow_key *key,
				    const struct nlattr *actions,
				    const int actions_len)
{
	struct action_fifo *fifo;
	struct deferred_action *da;

	fifo = this_cpu_ptr(action_fifos);
	da = action_fifo_put(fifo);
	if (da) {
		da->skb = skb;
		da->actions = actions;
		da->actions_len = actions_len;
		da->pkt_key = *key;
	}

	return da;
}
static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
			  void *hcpu)
{
	/*
	 * If a CPU goes away, splice its entries to the current CPU
	 * and trigger a run of the softirq
	 */
	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
		int cpu = (unsigned long) hcpu;

		local_irq_disable();
		list_splice_init(&per_cpu(blk_cpu_done, cpu),
				 this_cpu_ptr(&blk_cpu_done));
		raise_softirq_irqoff(BLOCK_SOFTIRQ);
		local_irq_enable();
	}

	return NOTIFY_OK;
}
Exemplo n.º 4
0
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
	   u64, flags, void *, data, u64, size)
{
	struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
	struct perf_raw_record raw = {
		.frag = {
			.size = size,
			.data = data,
		},
	};

	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
		return -EINVAL;

	perf_sample_data_init(sd, 0, 0);
	sd->raw = &raw;

	return __bpf_perf_event_output(regs, map, flags, sd);
}
Exemplo n.º 5
0
/*
 * We cannot restore the ibs pmu state, so we always needs to update
 * the event while stopping it and then reset the state when starting
 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
 */
static void perf_ibs_start(struct perf_event *event, int flags)
{
	struct hw_perf_event *hwc = &event->hw;
	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
	u64 period;

	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
		return;

	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
	hwc->state = 0;

	perf_ibs_set_period(perf_ibs, hwc, &period);
	set_bit(IBS_STARTED, pcpu->state);
	perf_ibs_enable_event(perf_ibs, hwc, period >> 4);

	perf_event_update_userpage(event);
}
Exemplo n.º 6
0
/* note: already called with rcu_read_lock */
static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
{
	struct sk_buff *skb = *pskb;

	if (is_ip_rx_frame(skb)) {
		struct net_device *dev = vrf_master_get_rcu(skb->dev);
		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);

		u64_stats_update_begin(&dstats->syncp);
		dstats->rx_pkts++;
		dstats->rx_bytes += skb->len;
		u64_stats_update_end(&dstats->syncp);

		skb->dev = dev;

		return RX_HANDLER_ANOTHER;
	}
	return RX_HANDLER_PASS;
}
Exemplo n.º 7
0
Arquivo: matrix.c Projeto: Lyude/linux
/**
 * irq_matrix_assign_system - Assign system wide entry in the matrix
 * @m:		Matrix pointer
 * @bit:	Which bit to reserve
 * @replace:	Replace an already allocated vector with a system
 *		vector at the same bit position.
 *
 * The BUG_ON()s below are on purpose. If this goes wrong in the
 * early boot process, then the chance to survive is about zero.
 * If this happens when the system is life, it's not much better.
 */
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
			      bool replace)
{
	struct cpumap *cm = this_cpu_ptr(m->maps);

	BUG_ON(bit > m->matrix_bits);
	BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));

	set_bit(bit, m->system_map);
	if (replace) {
		BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
		cm->allocated--;
		m->total_allocated--;
	}
	if (bit >= m->alloc_start && bit < m->alloc_end)
		m->systembits_inalloc++;

	trace_irq_matrix_assign_system(bit, m);
}
Exemplo n.º 8
0
static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
			  struct tcf_result *res)
{
	struct tcf_sample *s = to_sample(a);
	struct psample_group *psample_group;
	int retval;
	int size;
	int iif;
	int oif;

	tcf_lastuse_update(&s->tcf_tm);
	bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
	retval = READ_ONCE(s->tcf_action);

	rcu_read_lock();
	psample_group = rcu_dereference(s->psample_group);

	/* randomly sample packets according to rate */
	if (psample_group && (prandom_u32() % s->rate == 0)) {
		if (!skb_at_tc_ingress(skb)) {
			iif = skb->skb_iif;
			oif = skb->dev->ifindex;
		} else {
			iif = skb->dev->ifindex;
			oif = 0;
		}

		/* on ingress, the mac header gets popped, so push it back */
		if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
			skb_push(skb, skb->mac_len);

		size = s->truncate ? s->trunc_size : skb->len;
		psample_sample_packet(psample_group, skb, size, iif, oif,
				      s->rate);

		if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
			skb_pull(skb, skb->mac_len);
	}

	rcu_read_unlock();
	return retval;
}
Exemplo n.º 9
0
static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
{
	struct syscall_metadata *sys_data;
	struct syscall_trace_exit *rec;
	struct hlist_head *head;
	int syscall_nr;
	int rctx;
	int size;

	syscall_nr = trace_get_syscall_nr(current, regs);
	if (syscall_nr < 0)
		return;
	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

	/*
	 * Impossible, but be paranoid with the future
	 * How to put this check outside runtime?
	 */
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
		"exit event has grown above perf buffer size"))
		return;

	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
				sys_data->exit_event->event.type, regs, &rctx);
	if (!rec)
		return;

	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);

	head = this_cpu_ptr(sys_data->exit_event->perf_events);
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
Exemplo n.º 10
0
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
					    struct net_device *dev)
{
	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
	unsigned int len;
	int ret;

	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
	 *
	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
	 */
	if (
#ifdef CONFIG_VLAN_8021Q_DOUBLE_TAG
	    (vlan_double_tag) ||
#endif
	    (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q) ||
	     vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
		u16 vlan_tci;
		vlan_tci = vlan_dev_info(dev)->vlan_id;
		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
	}

	skb->dev = vlan_dev_info(dev)->real_dev;
	len = skb->len;
	ret = dev_queue_xmit(skb);

	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
		struct vlan_pcpu_stats *stats;

		stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_packets++;
		stats->tx_bytes += len;
		u64_stats_update_end(&stats->syncp);
	} else {
		this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
	}

	return ret;
}
Exemplo n.º 11
0
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
					    struct net_device *dev)
{
	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
	unsigned int len;
	int ret;

	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
	 *
	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
	 */
	if (veth->h_vlan_proto != vlan->vlan_proto ||
	    vlan->flags & VLAN_FLAG_REORDER_HDR) {
		u16 vlan_tci;
		vlan_tci = vlan->vlan_id;
		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
		__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
	}

	skb->dev = vlan->real_dev;
	len = skb->len;
	if (unlikely(netpoll_tx_running(dev)))
		return vlan_netpoll_send_skb(vlan, skb);

	ret = dev_queue_xmit(skb);

	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
		struct vlan_pcpu_stats *stats;

		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_packets++;
		stats->tx_bytes += len;
		u64_stats_update_end(&stats->syncp);
	} else {
		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
	}

	return ret;
}
Exemplo n.º 12
0
/**
 * percpu_ida_free - free a tag
 * @pool: pool @tag was allocated from
 * @tag: a tag previously allocated with percpu_ida_alloc()
 *
 * Safe to be called from interrupt context.
 */
void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
{
	struct percpu_ida_cpu *tags;
	unsigned long flags;
	unsigned nr_free;

	BUG_ON(tag >= pool->nr_tags);

	local_irq_save(flags);
	tags = this_cpu_ptr(pool->tag_cpu);

	spin_lock(&tags->lock);
	tags->freelist[tags->nr_free++] = tag;

	nr_free = tags->nr_free;
	spin_unlock(&tags->lock);

	if (nr_free == 1) {
		cpumask_set_cpu(smp_processor_id(),
				&pool->cpus_have_tags);
		wake_up(&pool->wait);
	}

	if (nr_free == pool->percpu_max_size) {
		spin_lock(&pool->lock);

		/*
		 * Global lock held and irqs disabled, don't need percpu
		 * lock
		 */
		if (tags->nr_free == pool->percpu_max_size) {
			move_tags(pool->freelist, &pool->nr_free,
				  tags->freelist, &tags->nr_free,
				  pool->percpu_batch_size);

			wake_up(&pool->wait);
		}
		spin_unlock(&pool->lock);
	}

	local_irq_restore(flags);
}
Exemplo n.º 13
0
Arquivo: time.c Projeto: 1314cc/linux
static int arc_timer_cpu_notify(struct notifier_block *self,
				unsigned long action, void *hcpu)
{
	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);

	evt->cpumask = cpumask_of(smp_processor_id());

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
		clockevents_config_and_register(evt, arc_timer_freq,
						0, ULONG_MAX);
		enable_percpu_irq(arc_timer_irq, 0);
		break;
	case CPU_DYING:
		disable_percpu_irq(arc_timer_irq);
		break;
	}

	return NOTIFY_OK;
}
Exemplo n.º 14
0
/*
 * Uninstall the breakpoint contained in the given counter.
 *
 * First we search the debug address register it uses and then we disable
 * it.
 *
 * Atomic: we hold the counter->ctx->lock and we only handle variables
 * and registers local to this cpu.
 */
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
	int i;

	for (i = 0; i < sh_ubc->num_events; i++) {
		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);

		if (*slot == bp) {
			*slot = NULL;
			break;
		}
	}

	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
		return;

	sh_ubc->disable(info, i);
	clk_disable(sh_ubc->clk);
}
Exemplo n.º 15
0
/*
 * Kernel-side NEON support functions
 */
void kernel_neon_begin_partial(u32 num_regs)
{
	if (in_interrupt()) {
		struct fpsimd_partial_state *s = this_cpu_ptr(
			in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
		BUG_ON(num_regs > 32);
		fpsimd_save_partial_state(s, roundup(num_regs, 2));
	} else {
	/*
	  * Save the userland FPSIMD state if we have one and if we
	  * haven't done so already. Clear fpsimd_last_state to indicate
	  * that there is no longer userland FPSIMD state in the
	  * registers.
	*/
	preempt_disable();

	if (current->mm)
		fpsimd_save_state(&current->thread.fpsimd_state);
	}
}
Exemplo n.º 16
0
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
{
	/*
	 * Note that generic IRQ core could have passed @evt for @dev_id if
	 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
	 */
	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
	int irq_reenable = clockevent_state_periodic(evt);

	/*
	 * Any write to CTRL reg ACks the interrupt, we rewrite the
	 * Count when [N]ot [H]alted bit.
	 * And re-arm it if perioid by [I]nterrupt [E]nable bit
	 */
	write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);

	evt->event_handler(evt);

	return IRQ_HANDLED;
}
Exemplo n.º 17
0
/* Bridge group multicast address 802.1d (pg 51). */
const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };

int qos_enable = 0;		/*  added pling 03/13/2007 */
static int br_pass_frame_up(struct sk_buff *skb)
{
	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
	struct net_bridge *br = netdev_priv(brdev);
	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);

	u64_stats_update_begin(&brstats->syncp);
	brstats->rx_packets++;
	brstats->rx_bytes += skb->len;
	u64_stats_update_end(&brstats->syncp);

	indev = skb->dev;
	/*  wklin added, 2010/06/15 @attach_dev */
	if (htons(ETH_P_ARP) == eth_hdr(skb)->h_proto)
	    *(pp_bridge_indev(skb)) = indev;/*backup incoming port to be used in arp.c */
	skb->dev = brdev;

	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
		       netif_receive_skb);
}
/*
 * Change the CPUMF state to active.
 * Enable and activate the CPU-counter sets according
 * to the per-cpu control state.
 */
static void cf_diag_enable(struct pmu *pmu)
{
	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
	int err;

	debug_sprintf_event(cf_diag_dbg, 5,
			    "%s pmu %p cpu %d flags %#x state %#llx\n",
			    __func__, pmu, smp_processor_id(), cpuhw->flags,
			    cpuhw->state);
	if (cpuhw->flags & PMU_F_ENABLED)
		return;

	err = lcctl(cpuhw->state);
	if (err) {
		pr_err("Enabling the performance measuring unit "
		       "failed with rc=%x\n", err);
		return;
	}
	cpuhw->flags |= PMU_F_ENABLED;
}
Exemplo n.º 19
0
static noinline void nft_update_chain_stats(const struct nft_chain *chain,
					    const struct nft_pktinfo *pkt)
{
	struct nft_base_chain *base_chain;
	struct nft_stats *stats;

	base_chain = nft_base_chain(chain);
	if (!base_chain->stats)
		return;

	local_bh_disable();
	stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
	if (stats) {
		u64_stats_update_begin(&stats->syncp);
		stats->pkts++;
		stats->bytes += pkt->skb->len;
		u64_stats_update_end(&stats->syncp);
	}
	local_bh_enable();
}
Exemplo n.º 20
0
/**
 *	ovs_vport_send - send a packet on a device
 *
 * @vport: vport on which to send the packet
 * @skb: skb to send
 *
 * Sends the given packet and returns the length of data sent.  Either ovs
 * lock or rcu_read_lock must be held.
 */
int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
	int sent = vport->ops->send(vport, skb);

	if (likely(sent > 0)) {
		struct pcpu_sw_netstats *stats;

		stats = this_cpu_ptr(vport->percpu_stats);

		u64_stats_update_begin(&stats->syncp);
		stats->tx_packets++;
		stats->tx_bytes += sent;
		u64_stats_update_end(&stats->syncp);
	} else if (sent < 0) {
		ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
	} else {
		ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
	}
	return sent;
}
Exemplo n.º 21
0
Arquivo: smp.c Projeto: Lyude/linux
/**
 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 *
 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 *		      offline CPU. Skip this check if set to 'false'.
 *
 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 * to ensure that all pending IPI callbacks are run before it goes completely
 * offline.
 *
 * Loop through the call_single_queue and run all the queued callbacks.
 * Must be called with interrupts disabled.
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
	struct llist_head *head;
	struct llist_node *entry;
	call_single_data_t *csd, *csd_next;
	static bool warned;

	lockdep_assert_irqs_disabled();

	head = this_cpu_ptr(&call_single_queue);
	entry = llist_del_all(head);
	entry = llist_reverse_order(entry);

	/* There shouldn't be any pending callbacks on an offline CPU. */
	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
		     !warned && !llist_empty(head))) {
		warned = true;
		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

		/*
		 * We don't have to use the _safe() variant here
		 * because we are not invoking the IPI handlers yet.
		 */
		llist_for_each_entry(csd, entry, llist)
			pr_warn("IPI callback %pS sent to offline CPU\n",
				csd->func);
	}

	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		/* Do we wait until *after* callback? */
		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
			func(info);
			csd_unlock(csd);
		} else {
			csd_unlock(csd);
			func(info);
		}
	}
Exemplo n.º 22
0
/*
 * Called from the idle task. We need to set active here, so we can kick off
 * the stopper thread and unpark the smpboot threads. If the target state is
 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
 * cpu further.
 */
void cpuhp_online_idle(enum cpuhp_state state)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
	unsigned int cpu = smp_processor_id();

	/* Happens for the boot cpu */
	if (state != CPUHP_AP_ONLINE_IDLE)
		return;

	st->state = CPUHP_AP_ONLINE_IDLE;

	/* Unpark the stopper thread and the hotplug thread of this cpu */
	stop_machine_unpark(cpu);
	kthread_unpark(st->thread);

	/* Should we go further up ? */
	if (st->target > CPUHP_AP_ONLINE_IDLE)
		__cpuhp_kick_ap_work(st);
	else
		complete(&st->done);
}
Exemplo n.º 23
0
/*
 * Runstate accounting
 */
static void get_runstate_snapshot(struct vcpu_runstate_info *res)
{
	u64 state_time;
	struct vcpu_runstate_info *state;

	BUG_ON(preemptible());

	state = this_cpu_ptr(&xen_runstate);

	/*
	 * The runstate info is always updated by the hypervisor on
	 * the current CPU, so there's no need to use anything
	 * stronger than a compiler barrier when fetching it.
	 */
	do {
		state_time = get64(&state->state_entry_time);
		barrier();
		*res = *state;
		barrier();
	} while (get64(&state->state_entry_time) != state_time);
}
Exemplo n.º 24
0
static int arch_timer_starting_cpu(unsigned int cpu)
{
	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
	u32 flags;

	__arch_timer_setup(ARCH_CP15_TIMER, clk);

	flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);

	if (arch_timer_has_nonsecure_ppi()) {
		flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
		enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
	}

	arch_counter_set_user_access();
	if (evtstrm_enable)
		arch_timer_configure_evtstream();

	return 0;
}
Exemplo n.º 25
0
/* Called with rcu_read_lock_bh. */
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	int len, err;

	len = skb->len;
	rcu_read_lock();
	err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
	rcu_read_unlock();

	if (likely(!err)) {
		struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);

		u64_stats_update_begin(&tstats->syncp);
		tstats->tx_bytes += len;
		tstats->tx_packets++;
		u64_stats_update_end(&tstats->syncp);
	} else {
		netdev->stats.tx_errors++;
	}
	return 0;
}
Exemplo n.º 26
0
static void watchdog_enable(unsigned int cpu)
{
	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);

	/*
	 * Start the timer first to prevent the NMI watchdog triggering
	 * before the timer has a chance to fire.
	 */
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
		      HRTIMER_MODE_REL_PINNED);

	/* Initialize timestamp */
	__touch_watchdog();
	/* Enable the perf event */
	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
		watchdog_nmi_enable(cpu);

	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
}
Exemplo n.º 27
0
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
{
	unsigned long flags;
	struct qlist_head *q;
	struct qlist_head temp = QLIST_INIT;

	local_irq_save(flags);

	q = this_cpu_ptr(&cpu_quarantine);
	qlist_put(q, &info->quarantine_link, cache->size);
	if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
		qlist_move_all(q, &temp);

	local_irq_restore(flags);

	if (unlikely(!qlist_empty(&temp))) {
		spin_lock_irqsave(&quarantine_lock, flags);
		qlist_move_all(&temp, &global_quarantine);
		spin_unlock_irqrestore(&quarantine_lock, flags);
	}
}
Exemplo n.º 28
0
irqreturn_t smp_ipi_demux(void)
{
    struct cpu_messages *info = this_cpu_ptr(&ipi_message);
    unsigned int all;

    mb();	/* order any irq clear */

    do {
        all = xchg(&info->messages, 0);
        if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
            generic_smp_call_function_interrupt();
        if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
            scheduler_ipi();
        if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
            tick_broadcast_ipi_handler();
        if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
            debug_ipi_action(0, NULL);
    } while (info->messages);

    return IRQ_HANDLED;
}
Exemplo n.º 29
0
static void take_over_work(struct ehca_comp_pool *pool, int cpu)
{
    struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
    LIST_HEAD(list);
    struct ehca_cq *cq;
    unsigned long flags_cct;

    spin_lock_irqsave(&cct->task_lock, flags_cct);

    list_splice_init(&cct->cq_list, &list);

    while (!list_empty(&list)) {
        cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);

        list_del(&cq->entry);
        __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
    }

    spin_unlock_irqrestore(&cct->task_lock, flags_cct);

}
Exemplo n.º 30
0
static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
{
	struct syscall_metadata *sys_data;
	struct syscall_trace_enter *rec;
	struct hlist_head *head;
	int syscall_nr;
	int rctx;
	int size;

	syscall_nr = trace_get_syscall_nr(current, regs);
	if (syscall_nr < 0)
		return;
	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
		      "perf buffer not large enough"))
		return;

	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
				sys_data->enter_event->event.type, regs, &rctx);
	if (!rec)
		return;

	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);

	head = this_cpu_ptr(sys_data->enter_event->perf_events);
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}