예제 #1
0
파일: osq_lock.c 프로젝트: 020gzh/linux
void osq_unlock(struct optimistic_spin_queue *lock)
{
	struct optimistic_spin_node *node, *next;
	int curr = encode_cpu(smp_processor_id());

	/*
	 * Fast path for the uncontended case.
	 */
	if (likely(atomic_cmpxchg_release(&lock->tail, curr,
					  OSQ_UNLOCKED_VAL) == curr))
		return;

	/*
	 * Second most likely case.
	 */
	node = this_cpu_ptr(&osq_node);
	next = xchg(&node->next, NULL);
	if (next) {
		WRITE_ONCE(next->locked, 1);
		return;
	}

	next = osq_wait_next(lock, node, NULL);
	if (next)
		WRITE_ONCE(next->locked, 1);
}
예제 #2
0
/**
 * \brief Net device change_mtu
 * @param netdev network device
 */
int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct lio *lio = GET_LIO(netdev);
	struct octeon_device *oct = lio->oct_dev;
	struct octeon_soft_command *sc;
	union octnet_cmd *ncmd;
	int ret = 0;

	sc = (struct octeon_soft_command *)
		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);

	ncmd = (union octnet_cmd *)sc->virtdptr;

	init_completion(&sc->complete);
	sc->sc_status = OCTEON_REQUEST_PENDING;

	ncmd->u64 = 0;
	ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
	ncmd->s.param1 = new_mtu;

	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));

	sc->iq_no = lio->linfo.txpciq[0].s.q_no;

	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
				    OPCODE_NIC_CMD, 0, 0, 0);

	ret = octeon_send_soft_command(oct, sc);
	if (ret == IQ_SEND_FAILED) {
		netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
		octeon_free_soft_command(oct, sc);
		return -EINVAL;
	}
	/* Sleep on a wait queue till the cond flag indicates that the
	 * response arrived or timed-out.
	 */
	ret = wait_for_sc_completion_timeout(oct, sc, 0);
	if (ret)
		return ret;

	if (sc->sc_status) {
		WRITE_ONCE(sc->caller_is_done, true);
		return -EINVAL;
	}

	netdev->mtu = new_mtu;
	lio->mtu = new_mtu;

	WRITE_ONCE(sc->caller_is_done, true);
	return 0;
}
예제 #3
0
static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
{
	u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
	int ret_val = 0;
	u32 q_no;
	u64 d64;

	for (q_no = 0; q_no < num_queues; q_no++) {
		/* set RST bit to 1. This bit applies to both IQ and OQ */
		d64 = octeon_read_csr64(oct,
					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
		d64 |= CN23XX_PKT_INPUT_CTL_RST;
		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
				   d64);
	}

	/* wait until the RST bit is clear or the RST and QUIET bits are set */
	for (q_no = 0; q_no < num_queues; q_no++) {
		u64 reg_val = octeon_read_csr64(oct,
					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
		while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
		       !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
		       loop) {
			WRITE_ONCE(reg_val, octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
			loop--;
		}
		if (!loop) {
			dev_err(&oct->pci_dev->dev,
				"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
				q_no);
			return -1;
		}
		WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
			   ~CN23XX_PKT_INPUT_CTL_RST);
		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
				   READ_ONCE(reg_val));

		WRITE_ONCE(reg_val, octeon_read_csr64(
		    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
		if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
			dev_err(&oct->pci_dev->dev,
				"clearing the reset failed for qno: %u\n",
				q_no);
			ret_val = -1;
		}
	}

	return ret_val;
}
예제 #4
0
파일: kcov.c 프로젝트: 513855417/linux
/*
 * Entry point from instrumented code.
 * This is called once per basic-block/edge.
 */
void notrace __sanitizer_cov_trace_pc(void)
{
	struct task_struct *t;
	enum kcov_mode mode;

	t = current;
	/*
	 * We are interested in code coverage as a function of a syscall inputs,
	 * so we ignore code executed in interrupts.
	 */
	if (!t || in_interrupt())
		return;
	mode = READ_ONCE(t->kcov_mode);
	if (mode == KCOV_MODE_TRACE) {
		unsigned long *area;
		unsigned long pos;

		/*
		 * There is some code that runs in interrupts but for which
		 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
		 * READ_ONCE()/barrier() effectively provides load-acquire wrt
		 * interrupts, there are paired barrier()/WRITE_ONCE() in
		 * kcov_ioctl_locked().
		 */
		barrier();
		area = t->kcov_area;
		/* The first word is number of subsequent PCs. */
		pos = READ_ONCE(area[0]) + 1;
		if (likely(pos < t->kcov_size)) {
			area[pos] = _RET_IP_;
			WRITE_ONCE(area[0], pos);
		}
	}
}
예제 #5
0
파일: alternative.c 프로젝트: 12zz/linux
/*
 * We might be patching the stop_machine state machine, so implement a
 * really simple polling protocol here.
 */
static int __apply_alternatives_multi_stop(void *unused)
{
	static int patched = 0;
	struct alt_region region = {
		.begin	= (struct alt_instr *)__alt_instructions,
		.end	= (struct alt_instr *)__alt_instructions_end,
	};

	/* We always have a CPU 0 at this point (__init) */
	if (smp_processor_id()) {
		while (!READ_ONCE(patched))
			cpu_relax();
		isb();
	} else {
		BUG_ON(patched);
		__apply_alternatives(&region, true);
		/* Barriers provided by the cache flushing */
		WRITE_ONCE(patched, 1);
	}

	return 0;
}

void __init apply_alternatives_all(void)
{
	/* better not try code patching on a live SMP system */
	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
예제 #6
0
파일: bts.c 프로젝트: bobcao3/linux
static void __bts_event_start(struct perf_event *event)
{
	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
	struct bts_buffer *buf = perf_get_aux(&bts->handle);
	u64 config = 0;

	if (!buf->snapshot)
		config |= ARCH_PERFMON_EVENTSEL_INT;
	if (!event->attr.exclude_kernel)
		config |= ARCH_PERFMON_EVENTSEL_OS;
	if (!event->attr.exclude_user)
		config |= ARCH_PERFMON_EVENTSEL_USR;

	bts_config_buffer(buf);

	/*
	 * local barrier to make sure that ds configuration made it
	 * before we enable BTS and bts::state goes ACTIVE
	 */
	wmb();

	/* INACTIVE/STOPPED -> ACTIVE */
	WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);

	intel_pmu_enable_bts(config);

}
예제 #7
0
파일: designware_pcm.c 프로젝트: mhei/linux
static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
    struct snd_pcm_runtime *runtime = substream->runtime;
    struct dw_i2s_dev *dev = runtime->private_data;
    int ret = 0;

    switch (cmd) {
    case SNDRV_PCM_TRIGGER_START:
    case SNDRV_PCM_TRIGGER_RESUME:
    case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
        WRITE_ONCE(dev->tx_ptr, 0);
        rcu_assign_pointer(dev->tx_substream, substream);
        break;
    case SNDRV_PCM_TRIGGER_STOP:
    case SNDRV_PCM_TRIGGER_SUSPEND:
    case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
        rcu_assign_pointer(dev->tx_substream, NULL);
        break;
    default:
        ret = -EINVAL;
        break;
    }

    return ret;
}
예제 #8
0
파일: reboot.c 프로젝트: AshishNamdev/linux
/*
 * Halt all other CPUs, calling the specified function on each of them
 *
 * This function can be used to halt all other CPUs on crash
 * or emergency reboot time. The function passed as parameter
 * will be called inside a NMI handler on all CPUs.
 */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
	unsigned long msecs;
	local_irq_disable();

	/* Make a note of crashing cpu. Will be used in NMI callback. */
	crashing_cpu = safe_smp_processor_id();

	shootdown_callback = callback;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
	/* Would it be better to replace the trap vector here? */
	if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
				 NMI_FLAG_FIRST, "crash"))
		return;		/* Return what? */
	/*
	 * Ensure the new callback function is set before sending
	 * out the NMI
	 */
	wmb();

	smp_send_nmi_allbutself();

	/* Kick CPUs looping in NMI context. */
	WRITE_ONCE(crash_ipi_issued, 1);

	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}

	/* Leave the nmi callback set */
}
예제 #9
0
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
{
	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
	struct task_cputime sum;

	/* Check if cputimer isn't running. This is accessed without locking. */
	if (!READ_ONCE(cputimer->running)) {
		/*
		 * The POSIX timer interface allows for absolute time expiry
		 * values through the TIMER_ABSTIME flag, therefore we have
		 * to synchronize the timer to the clock every time we start it.
		 */
		thread_group_cputime(tsk, &sum);
		update_gt_cputime(&cputimer->cputime_atomic, &sum);

		/*
		 * We're setting cputimer->running without a lock. Ensure
		 * this only gets written to in one operation. We set
		 * running after update_gt_cputime() as a small optimization,
		 * but barriers are not required because update_gt_cputime()
		 * can handle concurrent updates.
		 */
		WRITE_ONCE(cputimer->running, true);
	}
	sample_cputime_atomic(times, &cputimer->cputime_atomic);
}
예제 #10
0
파일: update.c 프로젝트: 020gzh/linux
/*
 * Inform RCU of the end of the in-kernel boot sequence.
 */
void rcu_end_inkernel_boot(void)
{
	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
		rcu_unexpedite_gp();
	if (rcu_normal_after_boot)
		WRITE_ONCE(rcu_normal, 1);
}
예제 #11
0
/*
 * Extract only the counts from the specified rcu_segcblist structure,
 * and place them in the specified rcu_cblist structure.  This function
 * supports both callback orphaning and invocation, hence the separation
 * of counts and callbacks.  (Callbacks ready for invocation must be
 * orphaned and adopted separately from pending callbacks, but counts
 * apply to all callbacks.  Locking must be used to make sure that
 * both orphaned-callbacks lists are consistent.)
 */
void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
					       struct rcu_cblist *rclp)
{
	rclp->len_lazy += rsclp->len_lazy;
	rclp->len += rsclp->len;
	rsclp->len_lazy = 0;
	WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */
}
예제 #12
0
파일: update.c 프로젝트: Lyude/linux
int rcu_jiffies_till_stall_check(void)
{
	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);

	/*
	 * Limit check must be consistent with the Kconfig limits
	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
	 */
	if (till_stall_check < 3) {
		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
		till_stall_check = 3;
	} else if (till_stall_check > 300) {
		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
		till_stall_check = 300;
	}
	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
}
예제 #13
0
/*
 * Removes the count for the old reader from the appropriate element of
 * the srcu_struct.
 */
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
	int newval = sp->srcu_lock_nesting[idx] - 1;

	WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
	if (!newval && READ_ONCE(sp->srcu_gp_waiting))
		swake_up_one(&sp->srcu_wq);
}
예제 #14
0
static void __fw_state_set(struct fw_state *fw_st,
			   enum fw_status status)
{
	WRITE_ONCE(fw_st->status, status);

	if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
		complete_all(&fw_st->completion);
}
예제 #15
0
/*
 * Insert counts from the specified rcu_cblist structure in the
 * specified rcu_segcblist structure.
 */
void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
				struct rcu_cblist *rclp)
{
	rsclp->len_lazy += rclp->len_lazy;
	/* ->len sampled locklessly. */
	WRITE_ONCE(rsclp->len, rsclp->len + rclp->len);
	rclp->len_lazy = 0;
	rclp->len = 0;
}
예제 #16
0
/*
 * Workqueue handler to drive one grace period and invoke any callbacks
 * that become ready as a result.  Single-CPU and !PREEMPT operation
 * means that we get away with murder on synchronization.  ;-)
 */
void srcu_drive_gp(struct work_struct *wp)
{
	int idx;
	struct rcu_head *lh;
	struct rcu_head *rhp;
	struct srcu_struct *sp;

	sp = container_of(wp, struct srcu_struct, srcu_work);
	if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
		return; /* Already running or nothing to do. */

	/* Remove recently arrived callbacks and wait for readers. */
	WRITE_ONCE(sp->srcu_gp_running, true);
	local_irq_disable();
	lh = sp->srcu_cb_head;
	sp->srcu_cb_head = NULL;
	sp->srcu_cb_tail = &sp->srcu_cb_head;
	local_irq_enable();
	idx = sp->srcu_idx;
	WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
	WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
	swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
	WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */

	/* Invoke the callbacks we removed above. */
	while (lh) {
		rhp = lh;
		lh = lh->next;
		local_bh_disable();
		rhp->func(rhp);
		local_bh_enable();
	}

	/*
	 * Enable rescheduling, and if there are more callbacks,
	 * reschedule ourselves.  This can race with a call_srcu()
	 * at interrupt level, but the ->srcu_gp_running checks will
	 * straighten that out.
	 */
	WRITE_ONCE(sp->srcu_gp_running, false);
	if (READ_ONCE(sp->srcu_cb_head))
		schedule_work(&sp->srcu_work);
}
예제 #17
0
파일: test.c 프로젝트: AlexShiLucky/linux
void *thread_update(void *arg)
{
	WRITE_ONCE(y, 1);
#ifndef FORCE_FAILURE_2
	synchronize_srcu(&ss);
#endif
	might_sleep();
	__unbuffered_tpr_x = READ_ONCE(x);

	return NULL;
}
예제 #18
0
/*
 * Enqueue the specified callback onto the specified rcu_segcblist
 * structure, updating accounting as needed.  Note that the ->len
 * field may be accessed locklessly, hence the WRITE_ONCE().
 * The ->len field is used by rcu_barrier() and friends to determine
 * if it must post a callback on this structure, and it is OK
 * for rcu_barrier() to sometimes post callbacks needlessly, but
 * absolutely not OK for it to ever miss posting a callback.
 */
void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
			   struct rcu_head *rhp, bool lazy)
{
	WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */
	if (lazy)
		rsclp->len_lazy++;
	smp_mb(); /* Ensure counts are updated before callback is enqueued. */
	rhp->next = NULL;
	*rsclp->tails[RCU_NEXT_TAIL] = rhp;
	rsclp->tails[RCU_NEXT_TAIL] = &rhp->next;
}
예제 #19
0
/**
 * scif_rb_commit - To submit the message to let the peer fetch it
 * @rb: ring buffer
 */
void scif_rb_commit(struct scif_rb *rb)
{
	/*
	 * We must ensure ordering between the all the data committed
	 * previously before we expose the new message to the peer by
	 * updating the write_ptr. This write barrier is paired with
	 * the read barrier in scif_rb_count(..)
	 */
	wmb();
	WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#ifdef CONFIG_INTEL_MIC_CARD
	/*
	 * X100 Si bug: For the case where a Core is performing an EXT_WR
	 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
	 * same address with the same data before it does the Doorbell Write.
	 * This way, if ordering is violated for the Interrupt Message, it will
	 * fall just behind the first Posted associated with the first EXT_WR.
	 */
	WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#endif
}
예제 #20
0
파일: smp.c 프로젝트: AlexShiLucky/linux
static int boot_secondary(unsigned int cpu, struct task_struct *ts)
{
	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
	unsigned long ccount;
	int i;

#ifdef CONFIG_HOTPLUG_CPU
	WRITE_ONCE(cpu_start_id, cpu);
	/* Pairs with the third memw in the cpu_restart */
	mb();
	system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
					     sizeof(cpu_start_id));
#endif
	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);

	for (i = 0; i < 2; ++i) {
		do
			ccount = get_ccount();
		while (!ccount);

		WRITE_ONCE(cpu_start_ccount, ccount);

		do {
			/*
			 * Pairs with the first two memws in the
			 * .Lboot_secondary.
			 */
			mb();
			ccount = READ_ONCE(cpu_start_ccount);
		} while (ccount && time_before(jiffies, timeout));

		if (ccount) {
			smp_call_function_single(0, mx_cpu_stop,
						 (void *)cpu, 1);
			WRITE_ONCE(cpu_start_ccount, 0);
			return -EIO;
		}
	}
	return 0;
}
예제 #21
0
파일: i40e_ptp.c 프로젝트: Lyude/linux
/**
 * i40e_ptp_set_increment - Utility function to update clock increment rate
 * @pf: Board private structure
 *
 * During a link change, the DMA frequency that drives the 1588 logic will
 * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
 * we must update the increment value per clock tick.
 **/
void i40e_ptp_set_increment(struct i40e_pf *pf)
{
	struct i40e_link_status *hw_link_info;
	struct i40e_hw *hw = &pf->hw;
	u64 incval;
	u32 mult;

	hw_link_info = &hw->phy.link_info;

	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);

	switch (hw_link_info->link_speed) {
	case I40E_LINK_SPEED_10GB:
		mult = I40E_PTP_10GB_INCVAL_MULT;
		break;
	case I40E_LINK_SPEED_1GB:
		mult = I40E_PTP_1GB_INCVAL_MULT;
		break;
	case I40E_LINK_SPEED_100MB:
	{
		static int warn_once;

		if (!warn_once) {
			dev_warn(&pf->pdev->dev,
				 "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
			warn_once++;
		}
		mult = 0;
		break;
	}
	case I40E_LINK_SPEED_40GB:
	default:
		mult = 1;
		break;
	}

	/* The increment value is calculated by taking the base 40GbE incvalue
	 * and multiplying it by a factor based on the link speed.
	 */
	incval = I40E_PTP_40GB_INCVAL * mult;

	/* Write the new increment value into the increment register. The
	 * hardware will not update the clock until both registers have been
	 * written.
	 */
	wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
	wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);

	/* Update the base adjustement value. */
	WRITE_ONCE(pf->ptp_adj_mult, mult);
	smp_mb(); /* Force the above update. */
}
예제 #22
0
파일: srcu.c 프로젝트: asmalldev/linux
/*
 * Increment the ->completed counter so that future SRCU readers will
 * use the other rank of the ->(un)lock_count[] arrays.  This allows
 * us to wait for pre-existing readers in a starvation-free manner.
 */
static void srcu_flip(struct srcu_struct *sp)
{
	WRITE_ONCE(sp->completed, sp->completed + 1);

	/*
	 * Ensure that if the updater misses an __srcu_read_unlock()
	 * increment, that task's next __srcu_read_lock() will see the
	 * above counter update.  Note that both this memory barrier
	 * and the one in srcu_readers_active_idx_check() provide the
	 * guarantee for __srcu_read_lock().
	 */
	smp_mb(); /* D */  /* Pairs with C. */
}
예제 #23
0
파일: bts.c 프로젝트: bobcao3/linux
static void __bts_event_stop(struct perf_event *event, int state)
{
	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);

	/* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
	WRITE_ONCE(bts->state, state);

	/*
	 * No extra synchronization is mandated by the documentation to have
	 * BTS data stores globally visible.
	 */
	intel_pmu_disable_bts();
}
예제 #24
0
파일: xchglock.c 프로젝트: akiyks/perfbook
int main(int argc, char *argv[])
{
	int nthreads = 0;

	create_thread(test_xchg_lock, (void *)0);
	nthreads++;
	create_thread(test_xchg_lock, (void *)1);
	nthreads++;

	smp_mb();
	while (atomic_read(&nthreadsrunning) < nthreads)
		poll(NULL, 0, 1);
	WRITE_ONCE(goflag, GOFLAG_RUN);
	smp_mb();
	poll(NULL, 0, 10000);
	smp_mb();
	WRITE_ONCE(goflag, GOFLAG_STOP);
	smp_mb();
	wait_all_threads();
	printf("lockacqs = %lu, lockerr = %lu\n", lockacqs, lockerr);
	return 0;
}
예제 #25
0
파일: af_rxrpc.c 프로젝트: avagin/linux
/**
 * rxrpc_kernel_set_max_life - Set maximum lifespan on a call
 * @sock: The socket the call is on
 * @call: The call to configure
 * @hard_timeout: The maximum lifespan of the call in jiffies
 *
 * Set the maximum lifespan of a call.  The call will end with ETIME or
 * ETIMEDOUT if it takes longer than this.
 */
void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
			       unsigned long hard_timeout)
{
	unsigned long now;

	mutex_lock(&call->user_mutex);

	now = jiffies;
	hard_timeout += now;
	WRITE_ONCE(call->expect_term_by, hard_timeout);
	rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);

	mutex_unlock(&call->user_mutex);
}
void update_vsyscall(struct timekeeper *tk)
{
	int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
	struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;

	/* Mark the new vclock used. */
	BUILD_BUG_ON(VCLOCK_MAX >= 32);
	WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode));

	gtod_write_begin(vdata);

	/* copy vsyscall data */
	vdata->vclock_mode	= vclock_mode;
	vdata->cycle_last	= tk->tkr_mono.cycle_last;
	vdata->mask		= tk->tkr_mono.mask;
	vdata->mult		= tk->tkr_mono.mult;
	vdata->shift		= tk->tkr_mono.shift;

	vdata->wall_time_sec		= tk->xtime_sec;
	vdata->wall_time_snsec		= tk->tkr_mono.xtime_nsec;

	vdata->monotonic_time_sec	= tk->xtime_sec
					+ tk->wall_to_monotonic.tv_sec;
	vdata->monotonic_time_snsec	= tk->tkr_mono.xtime_nsec
					+ ((u64)tk->wall_to_monotonic.tv_nsec
						<< tk->tkr_mono.shift);
	while (vdata->monotonic_time_snsec >=
					(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
		vdata->monotonic_time_snsec -=
					((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
		vdata->monotonic_time_sec++;
	}

	vdata->wall_time_coarse_sec	= tk->xtime_sec;
	vdata->wall_time_coarse_nsec	= (long)(tk->tkr_mono.xtime_nsec >>
						 tk->tkr_mono.shift);

	vdata->monotonic_time_coarse_sec =
		vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
	vdata->monotonic_time_coarse_nsec =
		vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;

	while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
		vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
		vdata->monotonic_time_coarse_sec++;
	}

	gtod_write_end(vdata);
}
예제 #27
0
/**
 * scif_rb_update_read_ptr
 * @rb: ring buffer
 */
void scif_rb_update_read_ptr(struct scif_rb *rb)
{
	u32 new_offset;

	new_offset = rb->current_read_offset;
	/*
	 * We must ensure ordering between the all the data committed or read
	 * previously before we expose the empty slot to the peer by updating
	 * the read_ptr. This barrier is paired with the memory barrier in
	 * scif_rb_space(..)
	 */
	mb();
	WRITE_ONCE(*rb->read_ptr, new_offset);
#ifdef CONFIG_INTEL_MIC_CARD
	/*
	 * X100 Si Bug: For the case where a Core is performing an EXT_WR
	 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
	 * same address with the same data before it does the Doorbell Write.
	 * This way, if ordering is violated for the Interrupt Message, it will
	 * fall just behind the first Posted associated with the first EXT_WR.
	 */
	WRITE_ONCE(*rb->read_ptr, new_offset);
#endif
}
예제 #28
0
/*
 * Clear all elements from the route table.
 */
void route_clear(void)
{
	struct route_entry *rep;
	struct route_entry *rep1;

	write_seqlock(&sl);
	rep = route_list.re_next;
	WRITE_ONCE(route_list.re_next, NULL);
	while (rep != NULL) {
		rep1 = rep->re_next;
		free(rep);
		rep = rep1;
	}
	write_sequnlock(&sl);
}
예제 #29
0
파일: mmu.c 프로젝트: austriancoder/linux
void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
{
	pgd_t *fixmap_pgdp;

	spin_lock(&swapper_pgdir_lock);
	fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
	WRITE_ONCE(*fixmap_pgdp, pgd);
	/*
	 * We need dsb(ishst) here to ensure the page-table-walker sees
	 * our new entry before set_p?d() returns. The fixmap's
	 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
	 */
	pgd_clear_fixmap();
	spin_unlock(&swapper_pgdir_lock);
}
예제 #30
0
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
	unsigned int wake_batch = sbq_calc_wake_batch(depth);
	int i;

	if (sbq->wake_batch != wake_batch) {
		WRITE_ONCE(sbq->wake_batch, wake_batch);
		/*
		 * Pairs with the memory barrier in sbq_wake_up() to ensure that
		 * the batch size is updated before the wait counts.
		 */
		smp_mb__before_atomic();
		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
			atomic_set(&sbq->ws[i].wait_cnt, 1);
	}
	sbitmap_resize(&sbq->sb, depth);
}