Beispiel #1
0
static irqreturn_t serial_ir_irq_handler(int i, void *blah)
{
	ktime_t kt;
	int counter, dcd;
	u8 status;
	ktime_t delkt;
	unsigned int data;
	static int last_dcd = -1;

	if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
		/* not our interrupt */
		return IRQ_NONE;
	}

	counter = 0;
	do {
		counter++;
		status = sinp(UART_MSR);
		if (counter > RS_ISR_PASS_LIMIT) {
			dev_err(&serial_ir.pdev->dev, "Trapped in interrupt");
			break;
		}
		if ((status & hardware[type].signal_pin_change) &&
		    sense != -1) {
			/* get current time */
			kt = ktime_get();

			/*
			 * The driver needs to know if your receiver is
			 * active high or active low, or the space/pulse
			 * sense could be inverted.
			 */

			/* calc time since last interrupt in nanoseconds */
			dcd = (status & hardware[type].signal_pin) ? 1 : 0;

			if (dcd == last_dcd) {
				dev_err(&serial_ir.pdev->dev,
					"ignoring spike: %d %d %lldns %lldns\n",
					dcd, sense, ktime_to_ns(kt),
					ktime_to_ns(serial_ir.lastkt));
				continue;
			}

			delkt = ktime_sub(kt, serial_ir.lastkt);
			if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
				data = IR_MAX_DURATION; /* really long time */
				if (!(dcd ^ sense)) {
					/* sanity check */
					dev_err(&serial_ir.pdev->dev,
						"dcd unexpected: %d %d %lldns %lldns\n",
						dcd, sense, ktime_to_ns(kt),
						ktime_to_ns(serial_ir.lastkt));
					/*
					 * detecting pulse while this
					 * MUST be a space!
					 */
					sense = sense ? 0 : 1;
				}
			} else {
				data = ktime_to_ns(delkt);
			}
			frbwrite(data, !(dcd ^ sense));
			serial_ir.lastkt = kt;
			last_dcd = dcd;
		}
	} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */

	mod_timer(&serial_ir.timeout_timer,
		  jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));

	ir_raw_event_handle(serial_ir.rcdev);

	return IRQ_HANDLED;
}
Beispiel #2
0
/*
 * Put CPU in low power mode.
 */
void arch_idle(void)
{
	bool allow[MSM_PM_SLEEP_MODE_NR];
	uint32_t sleep_limit = SLEEP_LIMIT_NONE;

	int64_t timer_expiration;
	int latency_qos;
	int ret;
	int i;
	unsigned int cpu;
	int64_t t1;
	static DEFINE_PER_CPU(int64_t, t2);
	int exit_stat;

	if (!atomic_read(&msm_pm_init_done))
		return;

	cpu = smp_processor_id();
	latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	/* get the next timer expiration */
	timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());

	t1 = ktime_to_ns(ktime_get());
	msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
	msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
	exit_stat = MSM_PM_STAT_IDLE_SPIN;

	for (i = 0; i < ARRAY_SIZE(allow); i++)
		allow[i] = true;

	if (num_online_cpus() > 1 ||
		(timer_expiration < msm_pm_idle_sleep_min_time) ||
		!msm_pm_irq_extns->idle_sleep_allowed()) {
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
	}

	for (i = 0; i < ARRAY_SIZE(allow); i++) {
		struct msm_pm_platform_data *mode =
					&msm_pm_modes[MSM_PM_MODE(cpu, i)];
		if (!mode->idle_supported || !mode->idle_enabled ||
			mode->latency >= latency_qos ||
			mode->residency * 1000ULL >= timer_expiration)
			allow[i] = false;
	}

	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
		uint32_t wait_us = CONFIG_MSM_IDLE_WAIT_ON_MODEM;
		while (msm_pm_modem_busy() && wait_us) {
			if (wait_us > 100) {
				udelay(100);
				wait_us -= 100;
			} else {
				udelay(wait_us);
				wait_us = 0;
			}
		}

		if (msm_pm_modem_busy()) {
			allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
			allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]
				= false;
		}
	}

	MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
		"%s(): latency qos %d, next timer %lld, sleep limit %u\n",
		__func__, latency_qos, timer_expiration, sleep_limit);

	for (i = 0; i < ARRAY_SIZE(allow); i++)
		MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
			"%s(): allow %s: %d\n", __func__,
			msm_pm_sleep_mode_labels[i], (int)allow[i]);

	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
		/* Sync the timer with SCLK, it is needed only for modem
		 * assissted pollapse case.
		 */
		int64_t next_timer_exp = msm_timer_enter_idle();
		uint32_t sleep_delay;
		bool low_power = false;

		sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
			next_timer_exp, MSM_PM_SLEEP_TICK_LIMIT);

		if (sleep_delay == 0) /* 0 would mean infinite time */
			sleep_delay = 1;

		if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
			sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;

#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_ACTIVE)
		sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_RETENTION)
		sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
#endif

		ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
		low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
		msm_timer_exit_idle(low_power);

		if (ret)
			exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
		else {
			exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
			msm_pm_sleep_limit = sleep_limit;
		}
	} else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
		ret = msm_pm_power_collapse_standalone(true);
		exit_stat = ret ?
			MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
			MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
	} else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
		ret = msm_pm_swfi(true);
		if (ret)
			while (!msm_pm_irq_extns->irq_pending())
				udelay(1);
		exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
	} else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
		msm_pm_swfi(false);
		exit_stat = MSM_PM_STAT_IDLE_WFI;
	} else {
		while (!msm_pm_irq_extns->irq_pending())
			udelay(1);
		exit_stat = MSM_PM_STAT_IDLE_SPIN;
	}

	__get_cpu_var(t2) = ktime_to_ns(ktime_get());
	msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
}
Beispiel #3
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return 0;
		}
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return idle_time;
}
Beispiel #4
0
static ssize_t poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "%lld\n", ktime_to_ns(kr3dm.acc_poll_delay));
}
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	ktime_t expires_next, now, entry_time, delta;
	int i, retries = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

	raw_spin_lock(&cpu_base->lock);
	entry_time = now = hrtimer_update_base(cpu_base);
retry:
	expires_next.tv64 = KTIME_MAX;
	/*
	 * We set expires_next to KTIME_MAX here with cpu_base->lock
	 * held to prevent that a timer is enqueued in our queue via
	 * the migration code. This does not affect enqueueing of
	 * timers which run their callback and need to be requeued on
	 * this CPU.
	 */
	cpu_base->expires_next.tv64 = KTIME_MAX;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		struct hrtimer_clock_base *base;
		struct timerqueue_node *node;
		ktime_t basenow;

		if (!(cpu_base->active_bases & (1 << i)))
			continue;

		base = cpu_base->clock_base + i;
		basenow = ktime_add(now, base->offset);

		while ((node = timerqueue_getnext(&base->active))) {
			struct hrtimer *timer;

			timer = container_of(node, struct hrtimer, node);

			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
				ktime_t expires;

				expires = ktime_sub(hrtimer_get_expires(timer),
						    base->offset);
				if (expires.tv64 < 0)
					expires.tv64 = KTIME_MAX;
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			__run_hrtimer(timer, &basenow);
		}
	}

	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
	 */
	cpu_base->expires_next = expires_next;
	raw_spin_unlock(&cpu_base->lock);

	/* Reprogramming necessary ? */
	if (expires_next.tv64 == KTIME_MAX ||
	    !tick_program_event(expires_next, 0)) {
		cpu_base->hang_detected = 0;
		return;
	}

	/*
	 * The next timer was already expired due to:
	 * - tracing
	 * - long lasting callbacks
	 * - being scheduled away when running in a VM
	 *
	 * We need to prevent that we loop forever in the hrtimer
	 * interrupt routine. We give it 3 attempts to avoid
	 * overreacting on some spurious event.
	 *
	 * Acquire base lock for updating the offsets and retrieving
	 * the current time.
	 */
	raw_spin_lock(&cpu_base->lock);
	now = hrtimer_update_base(cpu_base);
	cpu_base->nr_retries++;
	if (++retries < 3)
		goto retry;
	/*
	 * Give the system a chance to do something else than looping
	 * here. We stored the entry time, so we know exactly how long
	 * we spent here. We schedule the next event this amount of
	 * time away.
	 */
	cpu_base->nr_hangs++;
	cpu_base->hang_detected = 1;
	raw_spin_unlock(&cpu_base->lock);
	delta = ktime_sub(now, entry_time);
	if (delta.tv64 > cpu_base->max_hang_time.tv64)
		cpu_base->max_hang_time = delta;
	/*
	 * Limit it to a sensible value as we enforce a longer
	 * delay. Give the CPU at least 100ms to catch up.
	 */
	if (delta.tv64 > 100 * NSEC_PER_MSEC)
		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
	else
		expires_next = ktime_add(now, delta);
	tick_program_event(expires_next, 1);
	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
		    ktime_to_ns(delta));
}
Beispiel #6
0
void igb_ptp_init(struct igb_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct net_device *netdev = adapter->netdev;
#ifdef HAVE_PTP_1588_CLOCK_PINS
	int i;
#endif /* HAVE_PTP_1588_CLOCK_PINS */

	switch (hw->mac.type) {
	case e1000_82576:
		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 999999881;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
		adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
		adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576;
		adapter->ptp_caps.settime64 = igb_ptp_settime64_82576;
#else
		adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
		adapter->ptp_caps.settime = igb_ptp_settime_82576;
#endif
		adapter->ptp_caps.enable = igb_ptp_feature_enable;
		adapter->cc.read = igb_ptp_read_82576;
		adapter->cc.mask = CLOCKSOURCE_MASK(64);
		adapter->cc.mult = 1;
		adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
		/* Dial the nominal frequency. */
		E1000_WRITE_REG(hw, E1000_TIMINCA,
				INCPERIOD_82576 | INCVALUE_82576);
		break;
	case e1000_82580:
	case e1000_i350:
	case e1000_i354:
		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 62499999;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
		adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
		adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576;
		adapter->ptp_caps.settime64 = igb_ptp_settime64_82576;
#else
		adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
		adapter->ptp_caps.settime = igb_ptp_settime_82576;
#endif
		adapter->ptp_caps.enable = igb_ptp_feature_enable;
		adapter->cc.read = igb_ptp_read_82580;
		adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
		adapter->cc.mult = 1;
		adapter->cc.shift = 0;
		/* Enable the timer functions by clearing bit 31. */
		E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
		break;
	case e1000_i210:
	case e1000_i211:
#ifdef HAVE_PTP_1588_CLOCK_PINS
		for (i = 0; i < IGB_N_SDP; i++) {
			struct ptp_pin_desc *ppd = &adapter->sdp_config[i];

			snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
			ppd->index = i;
			ppd->func = PTP_PF_NONE;
		}
#endif /* HAVE_PTP_1588_CLOCK_PINS */
		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 62499999;
		adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
		adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
#ifdef HAVE_PTP_1588_CLOCK_PINS
		adapter->ptp_caps.n_pins = IGB_N_SDP;
#endif /* HAVE_PTP_1588_CLOCK_PINS */
		adapter->ptp_caps.pps = 1;
#ifdef HAVE_PTP_1588_CLOCK_PINS
		adapter->ptp_caps.pin_config = adapter->sdp_config;
#endif /* HAVE_PTP_1588_CLOCK_PINS */
		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
		adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
		adapter->ptp_caps.gettime64 = igb_ptp_gettime64_i210;
		adapter->ptp_caps.settime64 = igb_ptp_settime64_i210;
#else
		adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
		adapter->ptp_caps.settime = igb_ptp_settime_i210;
#endif
		adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
#ifdef HAVE_PTP_1588_CLOCK_PINS
		adapter->ptp_caps.verify = igb_ptp_verify_pin;
#endif /* HAVE_PTP_1588_CLOCK_PINS */
		/* Enable the timer functions by clearing bit 31. */
		E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
		break;
	default:
		adapter->ptp_clock = NULL;
		return;
	}

	E1000_WRITE_FLUSH(hw);

	spin_lock_init(&adapter->tmreg_lock);
	INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);

	/* Initialize the clock and overflow work for devices that need it. */
	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
		struct timespec64 ts = ktime_to_timespec64(ktime_get_real());

		igb_ptp_settime64_i210(&adapter->ptp_caps, &ts);
	} else {
		timecounter_init(&adapter->tc, &adapter->cc,
				 ktime_to_ns(ktime_get_real()));

		INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
				  igb_ptp_overflow_check_82576);

		schedule_delayed_work(&adapter->ptp_overflow_work,
				      IGB_SYSTIM_OVERFLOW_PERIOD);
	}

	/* Initialize the time sync interrupts for devices that support it. */
	if (hw->mac.type >= e1000_82580) {
		E1000_WRITE_REG(hw, E1000_TSIM, TSYNC_INTERRUPTS);
		E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
	}

	adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
	adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
						&adapter->pdev->dev);
	if (IS_ERR(adapter->ptp_clock)) {
		adapter->ptp_clock = NULL;
		dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
	} else {
		dev_info(&adapter->pdev->dev, "added PHC on %s\n",
			 adapter->netdev->name);
		adapter->flags |= IGB_FLAG_PTP;
	}
}
Beispiel #7
0
/*
 * Support ACPI 3.0 AML Timer operand
 * Returns 64-bit free-running, monotonically increasing timer
 * with 100ns granularity
 */
u64 acpi_os_get_timer(void)
{
	u64 time_ns = ktime_to_ns(ktime_get());
	do_div(time_ns, 100);
	return time_ns;
}
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int encid;
	struct timespec ts;
	struct rtc_time tm;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}
	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->channel_mode = AUDREC_CMD_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->samp_rate = 8000;
	audio->enc_type = ENC_TYPE_WAV | audio->mode;
	audio->source = INTERNAL_CODEC_TX_SOURCE_MIX_MASK;

	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_AUD_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);

	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->stopped = 0;
	audio->source = 0;
	audio->abort = 0;
	audpcm_in_flush(audio);
	audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS |
				AUDDEV_EVT_FREQ_CHG;

	rc = auddev_register_evt_listner(audio->device_events,
					AUDDEV_CLNT_ENC, audio->enc_id,
					pcm_in_listener, (void *) audio);
	if (rc) {
		MM_AUD_ERR("failed to register device event listener\n");
		goto evt_error;
	}
	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	getnstimeofday(&ts);
	rtc_time_to_tm(ts.tv_sec, &tm);
	pr_aud_info1("[ATS][start_recording][successful] at %lld \
		(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
		ktime_to_ns(ktime_get()),
		tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
	return rc;
evt_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
Beispiel #9
0
static int s3cfb_ioctl(struct fb_info *fb, unsigned int cmd, unsigned long arg)
{
	struct s3cfb_global *fbdev =
		platform_get_drvdata(to_platform_device(fb->device));
	struct fb_var_screeninfo *var = &fb->var;
	struct s3cfb_window *win = fb->par;
	struct s3cfb_lcd *lcd = fbdev->lcd;
	struct fb_fix_screeninfo *fix = &fb->fix;
	struct s3cfb_next_info next_fb_info;

	int ret = 0;

	union {
		struct s3cfb_user_window user_window;
		struct s3cfb_user_plane_alpha user_alpha;
		struct s3cfb_user_chroma user_chroma;
		int vsync;
	} p;

	switch (cmd) {
	case FBIO_WAITFORVSYNC:
		s3cfb_wait_for_vsync(fbdev);
		break;

	// Custom IOCTL added to return the VSYNC timestamp
	case S3CFB_WAIT_FOR_VSYNC:
		ret = s3cfb_wait_for_vsync(fbdev);
		if(ret > 0) {
			u64 nsecs = ktime_to_ns(fbdev->vsync_timestamp);
			copy_to_user((void*)arg, &nsecs, sizeof(u64));
		}
		break;

	case S3CFB_WIN_POSITION:
		if (copy_from_user(&p.user_window,
				   (struct s3cfb_user_window __user *)arg,
				   sizeof(p.user_window)))
			ret = -EFAULT;
		else {
			if (p.user_window.x < 0)
				p.user_window.x = 0;

			if (p.user_window.y < 0)
				p.user_window.y = 0;

			if (p.user_window.x + var->xres > lcd->width)
				win->x = lcd->width - var->xres;
			else
				win->x = p.user_window.x;

			if (p.user_window.y + var->yres > lcd->height)
				win->y = lcd->height - var->yres;
			else
				win->y = p.user_window.y;

			s3cfb_set_window_position(fbdev, win->id);
		}
		break;

	case S3CFB_WIN_SET_PLANE_ALPHA:
		if (copy_from_user(&p.user_alpha,
				   (struct s3cfb_user_plane_alpha __user *)arg,
				   sizeof(p.user_alpha)))
			ret = -EFAULT;
		else {
			win->alpha.mode = PLANE_BLENDING;
			win->alpha.channel = p.user_alpha.channel;
			win->alpha.value =
			    S3CFB_AVALUE(p.user_alpha.red,
					 p.user_alpha.green, p.user_alpha.blue);

			s3cfb_set_alpha_blending(fbdev, win->id);
		}
		break;

	case S3CFB_WIN_SET_CHROMA:
		if (copy_from_user(&p.user_chroma,
				   (struct s3cfb_user_chroma __user *)arg,
				   sizeof(p.user_chroma)))
			ret = -EFAULT;
		else {
			win->chroma.enabled = p.user_chroma.enabled;
			win->chroma.key = S3CFB_CHROMA(p.user_chroma.red,
						       p.user_chroma.green,
						       p.user_chroma.blue);

			s3cfb_set_chroma_key(fbdev, win->id);
		}
		break;

	case S3CFB_SET_VSYNC_INT:
		if (get_user(p.vsync, (int __user *)arg))
			ret = -EFAULT;
		else {
			if (p.vsync)
				s3cfb_set_global_interrupt(fbdev, 1);

			s3cfb_set_vsync_interrupt(fbdev, p.vsync);
		}
		break;

	case S3CFB_GET_CURR_FB_INFO:
		next_fb_info.phy_start_addr = fix->smem_start;
		next_fb_info.xres = var->xres;
		next_fb_info.yres = var->yres;
		next_fb_info.xres_virtual = var->xres_virtual;
		next_fb_info.yres_virtual = var->yres_virtual;
		next_fb_info.xoffset = var->xoffset;
		next_fb_info.yoffset = var->yoffset;
		next_fb_info.lcd_offset_x = 0;
		next_fb_info.lcd_offset_y = 0;

		if (copy_to_user((void *)arg,
				 (struct s3cfb_next_info *) &next_fb_info,
				 sizeof(struct s3cfb_next_info)))
			return -EFAULT;
		break;
	}

	return ret;
}
Beispiel #10
0
static void gp2a_light_enable(struct gp2a_data *gp2a)
{
	gp2a_dbgmsg("starting poll timer, delay %lldns\n",
		    ktime_to_ns(gp2a->light_poll_delay));
	hrtimer_start(&gp2a->timer, gp2a->light_poll_delay, HRTIMER_MODE_REL);
}
Beispiel #11
0
static ssize_t poll_delay_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct gp2a_data *gp2a = dev_get_drvdata(dev);
	return sprintf(buf, "%lld\n", ktime_to_ns(gp2a->light_poll_delay));
}
Beispiel #12
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
	ktime_t expires_next, now, entry_time, delta;
	int retries = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

	raw_spin_lock(&cpu_base->lock);
	entry_time = now = hrtimer_update_base(cpu_base);
retry:
	cpu_base->in_hrtirq = 1;
	/*
	 * We set expires_next to KTIME_MAX here with cpu_base->lock
	 * held to prevent that a timer is enqueued in our queue via
	 * the migration code. This does not affect enqueueing of
	 * timers which run their callback and need to be requeued on
	 * this CPU.
	 */
	cpu_base->expires_next.tv64 = KTIME_MAX;

	__hrtimer_run_queues(cpu_base, now);

	/* Reevaluate the clock bases for the next expiry */
	expires_next = __hrtimer_get_next_event(cpu_base);
	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
	 */
	cpu_base->expires_next = expires_next;
	cpu_base->in_hrtirq = 0;
	raw_spin_unlock(&cpu_base->lock);

	/* Reprogramming necessary ? */
	if (!tick_program_event(expires_next, 0)) {
		cpu_base->hang_detected = 0;
		return;
	}

	/*
	 * The next timer was already expired due to:
	 * - tracing
	 * - long lasting callbacks
	 * - being scheduled away when running in a VM
	 *
	 * We need to prevent that we loop forever in the hrtimer
	 * interrupt routine. We give it 3 attempts to avoid
	 * overreacting on some spurious event.
	 *
	 * Acquire base lock for updating the offsets and retrieving
	 * the current time.
	 */
	raw_spin_lock(&cpu_base->lock);
	now = hrtimer_update_base(cpu_base);
	cpu_base->nr_retries++;
	if (++retries < 3)
		goto retry;
	/*
	 * Give the system a chance to do something else than looping
	 * here. We stored the entry time, so we know exactly how long
	 * we spent here. We schedule the next event this amount of
	 * time away.
	 */
	cpu_base->nr_hangs++;
	cpu_base->hang_detected = 1;
	raw_spin_unlock(&cpu_base->lock);
	delta = ktime_sub(now, entry_time);
	if ((unsigned int)delta.tv64 > cpu_base->max_hang_time)
		cpu_base->max_hang_time = (unsigned int) delta.tv64;
	/*
	 * Limit it to a sensible value as we enforce a longer
	 * delay. Give the CPU at least 100ms to catch up.
	 */
	if (delta.tv64 > 100 * NSEC_PER_MSEC)
		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
	else
		expires_next = ktime_add(now, delta);
	tick_program_event(expires_next, 1);
	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
		    ktime_to_ns(delta));
}
Beispiel #13
0
static int max77663_rtc_probe(struct platform_device *pdev)
{
	struct max77663_platform_data *parent_pdata =
						pdev->dev.parent->platform_data;
	static struct max77663_rtc *rtc;
	int ret = 0;
	u8 irq_status;

	rtc = kzalloc(sizeof(struct max77663_rtc), GFP_KERNEL);
	if (!rtc) {
		dev_err(&pdev->dev, "probe: kzalloc() failed\n");
		return -ENOMEM;
	}
	rtc->shutdown_ongoing = false;
	dev_set_drvdata(&pdev->dev, rtc);
	rtc->dev = &pdev->dev;
	mutex_init(&rtc->io_lock);

	ret = max77663_rtc_preinit(rtc);
	if (ret) {
		dev_err(&pdev->dev, "probe: Failed to rtc preinit\n");
		goto out_kfree;
	}

	rtc->rtc = rtc_device_register("max77663-rtc", &pdev->dev,
				       &max77663_rtc_ops, THIS_MODULE);
	if (IS_ERR_OR_NULL(rtc->rtc)) {
		dev_err(&pdev->dev, "probe: Failed to register rtc\n");
		ret = PTR_ERR(rtc->rtc);
		goto out_kfree;
	}
#if defined(CONFIG_MACH_LGE)
	{
		struct rtc_time tm;
		
		max77663_rtc_read_time(rtc->dev, &tm);

		if(tm.tm_year < (RTC_YEAR_BASE+12))
		{
			tm.tm_year = RTC_YEAR_BASE+12;
			max77663_rtc_set_time(rtc->dev, &tm);
		}

		printk("max77663_rtc_probe: tm: %d-%02d-%02d %02d:%02d:%02d (%lld) \n",
		tm.tm_year, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min,
		tm.tm_sec, ktime_to_ns(ktime_get()));

		
	}
#endif

	if (parent_pdata->irq_base < 0)
		goto out;

	rtc->irq = parent_pdata->irq_base + MAX77663_IRQ_RTC;
	ret = request_threaded_irq(rtc->irq, NULL, max77663_rtc_irq,
				   IRQF_ONESHOT, "max77663-rtc", rtc);
	if (ret < 0) {
		dev_err(rtc->dev, "probe: Failed to request irq %d\n",
			rtc->irq);
		rtc->irq = -1;
	} else {
		device_init_wakeup(rtc->dev, 1);
		enable_irq_wake(rtc->irq);
	}

	ret = device_create_file(rtc->dev, &dev_attr_smplcount);
	if (ret < 0) {
		dev_err(rtc->dev,
			 "%s: failed to add smplcount sysfs: %d\n",
			 __func__, ret);	
	}

	ret = device_create_file(rtc->dev, &dev_attr_enablesmpl);
	if (ret < 0) {
		dev_err(rtc->dev,
			 "%s: failed to add enablesmpl sysfs: %d\n",
			 __func__, ret);	
	}

	return 0;

out_kfree:
	mutex_destroy(&rtc->io_lock);
	kfree(rtc->rtc);
out:
	return ret;
}
int aat1277_flashlight_control(int mode)
{
	int ret = 0;
	uint32_t flash_ns = ktime_to_ns(ktime_get());

	if (this_fl_str->mode_status == mode) {
		FLT_INFO_LOG("%s: mode is same: %d\n",
							"aat1277_flashlight", mode);

		if (!hrtimer_active(&this_fl_str->timer) &&
			this_fl_str->mode_status == FL_MODE_OFF) {
			FLT_INFO_LOG("flashlight hasn't been enable or" \
				" has already reset to 0 due to timeout\n");
			return ret;
		} else
			return -EINVAL;
	}

	spin_lock_irqsave(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	if (this_fl_str->mode_status == FL_MODE_FLASH) {
		hrtimer_cancel(&this_fl_str->timer);
		flashlight_turn_off();
	}
	switch (mode) {
	case FL_MODE_OFF:
		flashlight_turn_off();
	break;
	case FL_MODE_TORCH:
		flashlight_hw_command(3, 4);
		flashlight_hw_command(0, 6);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH;
		this_fl_str->fl_lcdev.brightness = LED_HALF;
	break;
	case FL_MODE_TORCH_LED_A:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 3);
		this_fl_str->mode_status = FL_MODE_TORCH_LED_A;
		this_fl_str->fl_lcdev.brightness = 1;
	break;
	case FL_MODE_TORCH_LED_B:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 2);
		this_fl_str->mode_status = FL_MODE_TORCH_LED_B;
		this_fl_str->fl_lcdev.brightness = 2;
	break;
	case FL_MODE_FLASH:
		flashlight_hw_command(2, 4);
		gpio_direction_output(this_fl_str->gpio_flash, 1);
		this_fl_str->mode_status = FL_MODE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_FULL;
		hrtimer_start(&this_fl_str->timer,
			ktime_set(this_fl_str->flash_sw_timeout_ms / 1000,
				(this_fl_str->flash_sw_timeout_ms % 1000) *
					NSEC_PER_MSEC), HRTIMER_MODE_REL);
	break;
	case FL_MODE_PRE_FLASH:
		flashlight_hw_command(3, 3);
		flashlight_hw_command(0, 6);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_PRE_FLASH;
		this_fl_str->fl_lcdev.brightness = LED_HALF + 1;
	break;
	case FL_MODE_TORCH_LEVEL_1:
		flashlight_hw_command(3, 4);
		flashlight_hw_command(0, 15);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 2;
	break;
	case FL_MODE_TORCH_LEVEL_2:
		flashlight_hw_command(3, 4);
		flashlight_hw_command(0, 10);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2;
		this_fl_str->fl_lcdev.brightness = LED_HALF - 1;
	break;
	case FL_MODE_CAMERA_EFFECT_PRE_FLASH:
		flashlight_hw_command(3, 12);
		flashlight_hw_command(0, 1);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_CAMERA_EFFECT_PRE_FLASH;
		this_fl_str->fl_lcdev.brightness = 3;
	break;
	case FL_MODE_CAMERA_EFFECT_FLASH:
		flashlight_hw_command(3, 1);
		flashlight_hw_command(0, 1);
		flashlight_hw_command(2, 4);
		this_fl_str->mode_status = FL_MODE_CAMERA_EFFECT_FLASH;
		this_fl_str->fl_lcdev.brightness = 4;
		hrtimer_start(&this_fl_str->timer,
			ktime_set(this_fl_str->flash_sw_timeout_ms / 1000,
				(this_fl_str->flash_sw_timeout_ms % 1000) *
					NSEC_PER_MSEC), HRTIMER_MODE_REL);
	break;

	default:
		FLT_ERR_LOG("%s: unknown flash_light flags: %d\n",
							__func__, mode);
		ret = -EINVAL;
	break;
	}

	FLT_INFO_LOG("%s: mode: %d, %u\n", "aat1277_flashlight", mode,
		flash_ns/(1000*1000));

	spin_unlock_irqrestore(&this_fl_str->spin_lock,
						this_fl_str->spinlock_flags);
	return ret;
}
Beispiel #15
0
/**
 * rtc_timer_do_work - Expires rtc timers
 * @rtc rtc device
 * @timer timer being removed.
 *
 * Expires rtc timers. Reprograms next alarm event if needed.
 * Called via worktask.
 *
 * Serializes access to timerqueue via ops_lock mutex
 */
void rtc_timer_do_work(struct work_struct *work)
{
	struct rtc_timer *timer;
	struct timerqueue_node *next;
	ktime_t now;
	struct rtc_time tm;

	struct rtc_device *rtc =
		container_of(work, struct rtc_device, irqwork);

	mutex_lock(&rtc->ops_lock);
again:
	__rtc_read_time(rtc, &tm);
	now = rtc_tm_to_ktime(tm);
	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
		if (next->expires > now)
			break;

		/* expire timer */
		timer = container_of(next, struct rtc_timer, node);
		timerqueue_del(&rtc->timerqueue, &timer->node);
		trace_rtc_timer_dequeue(timer);
		timer->enabled = 0;
		if (timer->func)
			timer->func(timer->rtc);

		trace_rtc_timer_fired(timer);
		/* Re-add/fwd periodic timers */
		if (ktime_to_ns(timer->period)) {
			timer->node.expires = ktime_add(timer->node.expires,
							timer->period);
			timer->enabled = 1;
			timerqueue_add(&rtc->timerqueue, &timer->node);
			trace_rtc_timer_enqueue(timer);
		}
	}

	/* Set next alarm */
	if (next) {
		struct rtc_wkalrm alarm;
		int err;
		int retry = 3;

		alarm.time = rtc_ktime_to_tm(next->expires);
		alarm.enabled = 1;
reprogram:
		err = __rtc_set_alarm(rtc, &alarm);
		if (err == -ETIME) {
			goto again;
		} else if (err) {
			if (retry-- > 0)
				goto reprogram;

			timer = container_of(next, struct rtc_timer, node);
			timerqueue_del(&rtc->timerqueue, &timer->node);
			trace_rtc_timer_dequeue(timer);
			timer->enabled = 0;
			dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
			goto again;
		}
	} else {
Beispiel #16
0
static bool
time_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
	const struct xt_time_info *info = par->matchinfo;
	unsigned int packet_time;
	struct xtm current_time;
	s64 stamp;

	/*
	 * We cannot use get_seconds() instead of __net_timestamp() here.
	 * Suppose you have two rules:
	 * 	1. match before 13:00
	 * 	2. match after 13:00
	 * If you match against processing time (get_seconds) it
	 * may happen that the same packet matches both rules if
	 * it arrived at the right moment before 13:00.
	 */
	if (skb->tstamp.tv64 == 0)
		__net_timestamp((struct sk_buff *)skb);

	stamp = ktime_to_ns(skb->tstamp);
	stamp = div_s64(stamp, NSEC_PER_SEC);

	if (info->flags & XT_TIME_LOCAL_TZ)
		/* Adjust for local timezone */
		stamp -= 60 * sys_tz.tz_minuteswest;

	/*
	 * xt_time will match when _all_ of the following hold:
	 *   - 'now' is in the global time range date_start..date_end
	 *   - 'now' is in the monthday mask
	 *   - 'now' is in the weekday mask
	 *   - 'now' is in the daytime range time_start..time_end
	 * (and by default, libxt_time will set these so as to match)
	 */

	if (stamp < info->date_start || stamp > info->date_stop)
		return false;

	packet_time = localtime_1(&current_time, stamp);

	if (info->daytime_start < info->daytime_stop) {
		if (packet_time < info->daytime_start ||
		    packet_time > info->daytime_stop)
			return false;
	} else {
		if (packet_time < info->daytime_start &&
		    packet_time > info->daytime_stop)
			return false;
	}

	localtime_2(&current_time, stamp);

	if (!(info->weekdays_match & (1 << current_time.weekday)))
		return false;

	/* Do not spend time computing monthday if all days match anyway */
	if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) {
		localtime_3(&current_time, stamp);
		if (!(info->monthdays_match & (1 << current_time.monthday)))
			return false;
	}

	return true;
}
Beispiel #17
0
/*
 * The worker for the various blk_add_trace*() types. Fills out a
 * blk_io_trace structure and places it in a per-cpu subbuffer.
 */
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
	struct task_struct *tsk = current;
	struct ring_buffer_event *event = NULL;
	struct ring_buffer *buffer = NULL;
	struct blk_io_trace *t;
	unsigned long flags = 0;
	unsigned long *sequence;
	pid_t pid;
	int cpu, pc = 0;
	bool blk_tracer = blk_tracer_enabled;

	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
		return;

	what |= ddir_act[rw & WRITE];
	what |= MASK_TC_BIT(rw, SYNCIO);
	what |= MASK_TC_BIT(rw, AHEAD);
	what |= MASK_TC_BIT(rw, META);
	what |= MASK_TC_BIT(rw, DISCARD);
	what |= MASK_TC_BIT(rw, FLUSH);
	what |= MASK_TC_BIT(rw, FUA);

	pid = tsk->pid;
	if (act_log_check(bt, what, sector, pid))
		return;
	cpu = raw_smp_processor_id();

	if (blk_tracer) {
		tracing_record_cmdline(current);

		buffer = blk_tr->buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + pdu_len,
						  0, pc);
		if (!event)
			return;
		t = ring_buffer_event_data(event);
		goto record_it;
	}

	/*
	 * A word about the locking here - we disable interrupts to reserve
	 * some space in the relay per-cpu buffer, to prevent an irq
	 * from coming in and stepping on our toes.
	 */
	local_irq_save(flags);

	if (unlikely(tsk->btrace_seq != blktrace_seq))
		trace_note_tsk(bt, tsk);

	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
	if (t) {
		sequence = per_cpu_ptr(bt->sequence, cpu);

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
		t->sequence = ++(*sequence);
		t->time = ktime_to_ns(ktime_get());
record_it:
		/*
		 * These two are not needed in ftrace as they are in the
		 * generic trace_entry, filled by tracing_generic_entry_update,
		 * but for the trace_event->bin() synthesizer benefit we do it
		 * here too.
		 */
		t->cpu = cpu;
		t->pid = pid;

		t->sector = sector;
		t->bytes = bytes;
		t->action = what;
		t->device = bt->dev;
		t->error = error;
		t->pdu_len = pdu_len;

		if (pdu_len)
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);

		if (blk_tracer) {
			trace_buffer_unlock_commit(buffer, event, 0, pc);
			return;
		}
	}

	local_irq_restore(flags);
}
Beispiel #18
0
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	u64 now = ktime_to_ns(ktime_get());
	struct fq_flow_head *head;
	struct sk_buff *skb;
	struct fq_flow *f;
	u32 rate;

	skb = fq_dequeue_head(sch, &q->internal);
	if (skb)
		goto out;
	fq_check_throttled(q, now);
begin:
	head = &q->new_flows;
	if (!head->first) {
		head = &q->old_flows;
		if (!head->first) {
			if (q->time_next_delayed_flow != ~0ULL)
				qdisc_watchdog_schedule_ns(&q->watchdog,
							   q->time_next_delayed_flow);
			return NULL;
		}
	}
	f = head->first;

	if (f->credit <= 0) {
		f->credit += q->quantum;
		head->first = f->next;
		fq_flow_add_tail(&q->old_flows, f);
		goto begin;
	}

	if (unlikely(f->head && now < f->time_next_packet)) {
		head->first = f->next;
		fq_flow_set_throttled(q, f);
		goto begin;
	}

	skb = fq_dequeue_head(sch, f);
	if (!skb) {
		head->first = f->next;
		/* force a pass through old_flows to prevent starvation */
		if ((head == &q->new_flows) && q->old_flows.first) {
			fq_flow_add_tail(&q->old_flows, f);
		} else {
			fq_flow_set_detached(f);
			q->inactive_flows++;
		}
		goto begin;
	}
	prefetch(&skb->end);
	f->time_next_packet = now;
	f->credit -= qdisc_pkt_len(skb);

	if (f->credit > 0 || !q->rate_enable)
		goto out;

	rate = q->flow_max_rate;
	if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
		rate = min(skb->sk->sk_pacing_rate, rate);

	if (rate != ~0U) {
		u32 plen = max(qdisc_pkt_len(skb), q->quantum);
		u64 len = (u64)plen * NSEC_PER_SEC;

		if (likely(rate))
			do_div(len, rate);
		/* Since socket rate can change later,
		 * clamp the delay to 125 ms.
		 * TODO: maybe segment the too big skb, as in commit
		 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
		 */
		if (unlikely(len > 125 * NSEC_PER_MSEC)) {
			len = 125 * NSEC_PER_MSEC;
			q->stat_pkts_too_long++;
		}

		f->time_next_packet = now + len;
	}
out:
	qdisc_bstats_update(sch, skb);
	qdisc_unthrottled(sch);
	return skb;
}
static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
		struct ifreq *ifr, int cmd)
{
	struct hwtstamp_config config;
	struct bfin_mac_local *lp = netdev_priv(netdev);
	u16 ptpctl;
	u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
			__func__, config.flags, config.tx_type, config.rx_filter);

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

	if ((config.tx_type != HWTSTAMP_TX_OFF) &&
			(config.tx_type != HWTSTAMP_TX_ON))
		return -ERANGE;

	ptpctl = bfin_read_EMAC_PTP_CTL();

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		/*
		 * Dont allow any timestamping
		 */
		ptpfv3 = 0xFFFFFFFF;
		bfin_write_EMAC_PTP_FV3(ptpfv3);
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
		/*
		 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
		 * to enable all the field matches.
		 */
		ptpctl &= ~0x1F00;
		bfin_write_EMAC_PTP_CTL(ptpctl);
		/*
		 * Keep the default values of the EMAC_PTP_FOFF register.
		 */
		ptpfoff = 0x4A24170C;
		bfin_write_EMAC_PTP_FOFF(ptpfoff);
		/*
		 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
		 * registers.
		 */
		ptpfv1 = 0x11040800;
		bfin_write_EMAC_PTP_FV1(ptpfv1);
		ptpfv2 = 0x0140013F;
		bfin_write_EMAC_PTP_FV2(ptpfv2);
		/*
		 * The default value (0xFFFC) allows the timestamping of both
		 * received Sync messages and Delay_Req messages.
		 */
		ptpfv3 = 0xFFFFFFFC;
		bfin_write_EMAC_PTP_FV3(ptpfv3);

		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
		break;
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
		/* Clear all five comparison mask bits (bits[12:8]) in the
		 * EMAC_PTP_CTL register to enable all the field matches.
		 */
		ptpctl &= ~0x1F00;
		bfin_write_EMAC_PTP_CTL(ptpctl);
		/*
		 * Keep the default values of the EMAC_PTP_FOFF register, except set
		 * the PTPCOF field to 0x2A.
		 */
		ptpfoff = 0x2A24170C;
		bfin_write_EMAC_PTP_FOFF(ptpfoff);
		/*
		 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
		 * registers.
		 */
		ptpfv1 = 0x11040800;
		bfin_write_EMAC_PTP_FV1(ptpfv1);
		ptpfv2 = 0x0140013F;
		bfin_write_EMAC_PTP_FV2(ptpfv2);
		/*
		 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
		 * the value to 0xFFF0.
		 */
		ptpfv3 = 0xFFFFFFF0;
		bfin_write_EMAC_PTP_FV3(ptpfv3);

		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
		break;
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
		/*
		 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
		 * EFTM and PTPCM field comparison.
		 */
		ptpctl &= ~0x1100;
		bfin_write_EMAC_PTP_CTL(ptpctl);
		/*
		 * Keep the default values of all the fields of the EMAC_PTP_FOFF
		 * register, except set the PTPCOF field to 0x0E.
		 */
		ptpfoff = 0x0E24170C;
		bfin_write_EMAC_PTP_FOFF(ptpfoff);
		/*
		 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
		 * corresponds to PTP messages on the MAC layer.
		 */
		ptpfv1 = 0x110488F7;
		bfin_write_EMAC_PTP_FV1(ptpfv1);
		ptpfv2 = 0x0140013F;
		bfin_write_EMAC_PTP_FV2(ptpfv2);
		/*
		 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
		 * messages, set the value to 0xFFF0.
		 */
		ptpfv3 = 0xFFFFFFF0;
		bfin_write_EMAC_PTP_FV3(ptpfv3);

		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
		break;
	default:
		return -ERANGE;
	}

	if (config.tx_type == HWTSTAMP_TX_OFF &&
	    bfin_mac_hwtstamp_is_none(config.rx_filter)) {
		ptpctl &= ~PTP_EN;
		bfin_write_EMAC_PTP_CTL(ptpctl);

		SSYNC();
	} else {
		ptpctl |= PTP_EN;
		bfin_write_EMAC_PTP_CTL(ptpctl);

		/*
		 * clear any existing timestamp
		 */
		bfin_read_EMAC_PTP_RXSNAPLO();
		bfin_read_EMAC_PTP_RXSNAPHI();

		bfin_read_EMAC_PTP_TXSNAPLO();
		bfin_read_EMAC_PTP_TXSNAPHI();

		/*
		 * Set registers so that rollover occurs soon to test this.
		 */
		bfin_write_EMAC_PTP_TIMELO(0x00000000);
		bfin_write_EMAC_PTP_TIMEHI(0xFF800000);

		SSYNC();

		lp->compare.last_update = 0;
		timecounter_init(&lp->clock,
				&lp->cycles,
				ktime_to_ns(ktime_get_real()));
		timecompare_update(&lp->compare, 0);
	}

	lp->stamp_cfg = config;
	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}
Beispiel #20
0
int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
			struct gpio_event_info *info, void **data, int func)
{
	int ret;
	int i;
	unsigned long irqflags;
	struct gpio_event_input_info *di;
	struct gpio_input_state *ds = *data;
	struct kobject *keyboard_kobj;

	di = container_of(info, struct gpio_event_input_info, info);

#ifdef CONFIG_POWER_KEY_CLR_RESET
	gis = di;
#endif

	if (func == GPIO_EVENT_FUNC_SUSPEND) {
		if (ds->use_irq)
			for (i = 0; i < di->keymap_size; i++)
				disable_irq(gpio_to_irq(di->keymap[i].gpio));
#ifndef CONFIG_MFD_MAX8957
		hrtimer_cancel(&ds->timer);
#endif
		return 0;
	}
	if (func == GPIO_EVENT_FUNC_RESUME) {
		spin_lock_irqsave(&ds->irq_lock, irqflags);
		if (ds->use_irq)
			for (i = 0; i < di->keymap_size; i++)
				enable_irq(gpio_to_irq(di->keymap[i].gpio));
#ifndef CONFIG_MFD_MAX8957
		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
#endif
		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
		return 0;
	}

	if (func == GPIO_EVENT_FUNC_INIT) {
		if (ktime_to_ns(di->poll_time) <= 0)
			di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);

		*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
					di->keymap_size, GFP_KERNEL);
		if (ds == NULL) {
			ret = -ENOMEM;
			KEY_LOGE("KEY_ERR: %s: "
				"Failed to allocate private data\n", __func__);
			goto err_ds_alloc_failed;
		}
		ds->debounce_count = di->keymap_size;
		ds->input_devs = input_devs;
		ds->info = di;
		wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input");
#ifdef CONFIG_MFD_MAX8957
		wake_lock_init(&ds->key_pressed_wake_lock, WAKE_LOCK_SUSPEND, "pwr_key_pressed");
#endif
#ifdef CONFIG_POWER_KEY_CLR_RESET
		wake_lock_init(&key_reset_clr_wake_lock, WAKE_LOCK_SUSPEND, "gpio_input_pwr_clear");
#endif
		spin_lock_init(&ds->irq_lock);
		if (board_build_flag() == 0)
			ds->debug_log = 0;
		else
			ds->debug_log = 1;

		for (i = 0; i < di->keymap_size; i++) {
			int dev = di->keymap[i].dev;
			if (dev >= input_devs->count) {
				KEY_LOGE("KEY_ERR: %s: bad device "
					"index %d >= %d for key code %d\n",
					__func__, dev, input_devs->count,
					di->keymap[i].code);
				ret = -EINVAL;
				goto err_bad_keymap;
			}
			input_set_capability(input_devs->dev[dev], di->type,
					     di->keymap[i].code);
			ds->key_state[i].ds = ds;
			ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
		}

		for (i = 0; i < di->keymap_size; i++) {
			ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
			if (ret) {
				KEY_LOGE("KEY_ERR: %s: gpio_request "
					"failed for %d\n", __func__, di->keymap[i].gpio);
				goto err_gpio_request_failed;
			}
			ret = gpio_direction_input(di->keymap[i].gpio);
			if (ret) {
				KEY_LOGE("KEY_ERR: %s: "
					"gpio_direction_input failed for %d\n",
					__func__, di->keymap[i].gpio);
				goto err_gpio_configure_failed;
			}
		}

		if (di->setup_input_gpio)
			di->setup_input_gpio();
#ifdef CONFIG_MFD_MAX8957
		ki_queue = create_singlethread_workqueue("ki_queue");
#endif

		ret = gpio_event_input_request_irqs(ds);

		keyboard_kobj = kobject_create_and_add("keyboard", NULL);
		if (keyboard_kobj == NULL) {
			KEY_LOGE("KEY_ERR: %s: subsystem_register failed\n", __func__);
			ret = -ENOMEM;
			return ret;
		}
		if (sysfs_create_file(keyboard_kobj, &dev_attr_vol_wakeup.attr))
			KEY_LOGE("KEY_ERR: %s: sysfs_create_file "
					"return %d\n", __func__, ret);
		wakeup_bitmask = 0;
		set_wakeup = 0;
		spin_lock_irqsave(&ds->irq_lock, irqflags);
		ds->use_irq = ret == 0;

		KEY_LOGI("GPIO Input Driver: Start gpio inputs for %s%s in %s "
			"mode\n", input_devs->dev[0]->name,
			(input_devs->count > 1) ? "..." : "",
			ret == 0 ? "interrupt" : "polling");

#ifndef CONFIG_MFD_MAX8957
		hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		ds->timer.function = gpio_event_input_timer_func;
		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
#endif
		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
		return 0;
	}

	ret = 0;
	spin_lock_irqsave(&ds->irq_lock, irqflags);
#ifndef CONFIG_MFD_MAX8957
	hrtimer_cancel(&ds->timer);
#endif
	if (ds->use_irq) {
		for (i = di->keymap_size - 1; i >= 0; i--) {
			free_irq(gpio_to_irq(di->keymap[i].gpio),
				 &ds->key_state[i]);
		}
	}
	spin_unlock_irqrestore(&ds->irq_lock, irqflags);

	for (i = di->keymap_size - 1; i >= 0; i--) {
err_gpio_configure_failed:
		gpio_free(di->keymap[i].gpio);
err_gpio_request_failed:
		;
	}
err_bad_keymap:
	wake_lock_destroy(&ds->wake_lock);
#ifdef CONFIG_MFD_MAX8957
	wake_lock_destroy(&ds->key_pressed_wake_lock);
#endif
#ifdef CONFIG_POWER_KEY_CLR_RESET
	wake_lock_destroy(&key_reset_clr_wake_lock);
#endif
	kfree(ds);
err_ds_alloc_failed:
	return ret;
}
Beispiel #21
0
static void capture_work_1(struct work_struct *work)
{
	char buf[64];
	char *envp[2];

	snprintf(buf, sizeof(buf), "CAPTURE1=BUFFER%d,TIME=%llu",bsp_disp_capture_screen_get_buffer_id(1), ktime_to_ns(capture_info.capture_timestamp[1]));
	envp[0] = buf;
	envp[1] = NULL;
	kobject_uevent_env(&capture_info.dev->kobj, KOBJ_CHANGE, envp);
}
Beispiel #22
0
/* NOTE: doesn't handle new transfer while old is running. */
int dma_init_kthread(void *data) {

  ssize_t last_size;
  last_size = 0;
  
  while ( !kthread_should_stop() ) {

    /* done with previous transfer */
    total_size -= dma_size;

    /* unmap last DMA mapping */
    pci_unmap_single(dev_ptr, dma_bus_addr, 
		     dma_size, PCI_DMA_TODEVICE);

    if ( total_size > 0 ) {
      
      /* update transfered size thus far */
      last_size += dma_size;

      /* assign next transfer size */
      dma_size = MIN(total_size, 4 * ALMOST_EMPTY * 1024);

      /* map next DMA buffer */
      dma_bus_addr = pci_map_single(dev_ptr, dma_virt_addr + last_size, 
				    dma_size, PCI_DMA_TODEVICE);

     #if DEBUG != 0
      printk(KERN_DEBUG "NEXT DMA TRANSFER OF SIZE %u, "
	     "offset last_size %u, DELAY %u microseconds\n", 
	     (unsigned)dma_size, (unsigned)last_size, 
	     (unsigned)dma_delay);
     #endif
    }  
    else {
      kfree(dma_virt_addr);
      goto sleep;
    }

    /* wait for FIFO to deplete to start transfer */
    if ( dma_delay ) {
      dma_delay /= 1000; /* ns -> ~us */
      usleep_range(dma_delay - 1, dma_delay);
    }

    /* clear interrupts and disable DMA */
    iowrite8(0x08, timing_card[12].base + 0xa9);
    iowrite8(0x00, timing_card[12].base + 0xa9);

    /* Mode - 32 bit bus, don't increment local addr, enable interrupt */
    iowrite32(cpu_to_le32(0x00020c01),   timing_card[12].base + 0x94); 

    /* PCI and local bus addresses, transfer count, transfer direction */
    iowrite32(cpu_to_le32(dma_bus_addr), timing_card[12].base + 0x98);
    iowrite32(cpu_to_le32(0x14),         timing_card[12].base + 0x9c);
    iowrite32(cpu_to_le32(dma_size),     timing_card[12].base + 0xa0);
    iowrite32(0x00,                      timing_card[12].base + 0xa4);

    /* Enable DMA */
    iowrite8( 0x01, timing_card[12].base + 0xa9);
 
    /* Start DMA, record start time */
    iowrite8( 0x03, timing_card[12].base + 0xa9);
    start_ns = ktime_to_ns(ktime_get());
    
   sleep:
    set_current_state(TASK_INTERRUPTIBLE);
    schedule();
  }
  
  /* I am dying now */
  return 0;
} /* end kthread function */
Beispiel #23
0
ssize_t mdp4_dsi_video_show_event(struct device *dev,
                                  struct device_attribute *attr, char *buf)
{
    int cndx;
    struct vsycn_ctrl *vctrl;
    ssize_t ret = 0;
    unsigned long flags;
    u64 vsync_tick;
    ktime_t ctime;
    u32 ctick, ptick;
    int diff;


    cndx = 0;
    vctrl = &vsync_ctrl_db[0];

    if (atomic_read(&vctrl->suspend) > 0 ||
            atomic_read(&vctrl->vsync_resume) == 0)
        return 0;

    /*
     * show_event thread keep spinning on vctrl->vsync_comp
     * race condition on x.done if multiple thread blocked
     * at wait_for_completion(&vctrl->vsync_comp)
     *
     * if show_event thread waked up first then it will come back
     * and call INIT_COMPLETION(vctrl->vsync_comp) which set x.done = 0
     * then second thread wakeed up which set x.done = 0x7ffffffd
     * after that wait_for_completion will never wait.
     * To avoid this, force show_event thread to sleep 5 ms here
     * since it has full vsycn period (16.6 ms) to wait
     */
    ctime = ktime_get();
    ctick = (u32)ktime_to_us(ctime);
    ptick = (u32)ktime_to_us(vctrl->vsync_time);
    ptick += 5000;	/* 5ms */
    diff = ptick - ctick;
    if (diff > 0) {
        if (diff > 1000) /* 1 ms */
            diff = 1000;
        usleep(diff);
    }

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    if (vctrl->wait_vsync_cnt == 0)
        INIT_COMPLETION(vctrl->vsync_comp);
    vctrl->wait_vsync_cnt++;
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);
    ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
            msecs_to_jiffies(VSYNC_PERIOD * 4));
    if (ret <= 0) {
        vctrl->wait_vsync_cnt = 0;
        vsync_tick = ktime_to_ns(ktime_get());
        __DLOG__(vsync_tick, vctrl->wait_vsync_cnt,0x3333);
        ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
        buf[strlen(buf) + 1] = '\0';
        return ret;
    }

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    vsync_tick = ktime_to_ns(vctrl->vsync_time);
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);

    __DLOG__(vsync_tick, vctrl->wait_vsync_cnt,0x9999);
    ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
    buf[strlen(buf) + 1] = '\0';
    return ret;
}
Beispiel #24
0
/* function to initiate a DMA transfer */
static ssize_t dma_transfer(struct file *filp, const char __user *buf,
          size_t count, loff_t *f_pos) {

  int rc;
  u32 tmp32;

 #if DEBUG != 0
  printk(KERN_DEBUG "dma_transfer() entry\n");
 #endif

  /* kill old kthread */
  if ( dma_kthread ) {
    kthread_stop(dma_kthread);
    wake_up_process(dma_kthread);
  }
  /* start new kthread */
  dma_kthread = kthread_create(dma_init_kthread, NULL, "dma_kthread");
  
  /* initialize first DMA transfer */
  total_size = count;
  dma_size = MIN(count, FIFO_SIZE * fifo_width);
  dma_virt_addr = kmalloc(total_size, GFP_KERNEL | GFP_DMA);
  dma_bus_addr = pci_map_single(dev_ptr, dma_virt_addr, dma_size, 
				PCI_DMA_TODEVICE);

  /* get data */
  rc = copy_from_user(dma_virt_addr, buf, count);
  if (rc) {
    printk(KERN_ALERT "timing_write() bad copy_from_user\n");
    return -EFAULT;
  }

  /* enable interrupts from DMA done activity */
  tmp32 = ioread32(timing_card[12].base + PLX9080_INTCSR);
  iowrite32( tmp32 | ( 0x1 << 8 ) | ( 0x1 << 19 ),
	     timing_card[12].base + PLX9080_INTCSR);

  /* clear interrupts and disable DMA */
  iowrite8(0x08, timing_card[12].base + 0xa9);
  iowrite8(0x00, timing_card[12].base + 0xa9);

  /* Mode - 32 bit bus, don't increment local addr, enable interrupt */
  iowrite32(cpu_to_le32(0x00020c01),   timing_card[12].base + 0x94); 

  /* PCI and local bus addresses, transfer count, transfer direction */
  iowrite32(cpu_to_le32(dma_bus_addr), timing_card[12].base + 0x98);
  iowrite32(cpu_to_le32(0x14),         timing_card[12].base + 0x9c);
  iowrite32(cpu_to_le32(dma_size),     timing_card[12].base + 0xa0);
  iowrite32(0x00,                      timing_card[12].base + 0xa4);

  /* Enable DMA */
  iowrite8( 0x01, timing_card[12].base + 0xa9);
 
  /* Start DMA */
  iowrite8( 0x03, timing_card[12].base + 0xa9);
  start_ns =  ktime_to_ns(ktime_get());

 #if DEBUG != 0
  printk(KERN_DEBUG "dma_transfer() exit success\n");
 #endif 

  return count;
} /* end DMA transfer function */
/*
 * journal_commit_transaction
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
void journal_commit_transaction(journal_t *journal)
{
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
	unsigned int blocknr;
	ktime_t start_time;
	u64 commit_time;
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
	int i;
	struct blk_plug plug;

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

	/* Do we need to erase the effects of a prior journal_flush? */
	if (journal->j_flags & JFS_FLUSHED) {
		jbd_debug(3, "super block updated\n");
		journal_update_superblock(journal, 1);
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

	trace_jbd_start_commit(journal, commit_transaction);
	jbd_debug(1, "JBD: starting commit of transaction %d\n",
			commit_transaction->t_tid);

	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_LOCKED;

	trace_jbd_commit_locking(journal, commit_transaction);
	spin_lock(&commit_transaction->t_handle_lock);
	while (commit_transaction->t_updates) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
		if (commit_transaction->t_updates) {
			spin_unlock(&commit_transaction->t_handle_lock);
			spin_unlock(&journal->j_state_lock);
			schedule();
			spin_lock(&journal->j_state_lock);
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

	J_ASSERT (commit_transaction->t_outstanding_credits <=
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
	 * transactions, then it may try to do a journal_restart() while
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
	 * that multiple journal_get_write_access() calls to the same
	 * buffer are perfectly permissible.
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
		 * A journal_get_undo_access()+journal_release_buffer() may
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
			jbd_free(jh->b_committed_data, bh->b_size);
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
		journal_refile_buffer(journal, jh);
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
	__journal_clean_checkpoint_list(journal);
	spin_unlock(&journal->j_list_lock);

	jbd_debug (3, "JBD: commit phase 1\n");

	/*
	 * Clear revoked flag to reflect there is no revoked buffers
	 * in the next transaction which is going to be started.
	 */
	journal_clear_buffer_revoked_flags(journal);

	/*
	 * Switch to a new revoke table.
	 */
	journal_switch_revoke_table(journal);

	trace_jbd_commit_flushing(journal, commit_transaction);
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
	start_time = ktime_get();
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
	spin_unlock(&journal->j_state_lock);

	jbd_debug (3, "JBD: commit phase 2\n");

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
	blk_start_plug(&plug);
	err = journal_submit_data_buffers(journal, commit_transaction,
					  WRITE_SYNC);
	blk_finish_plug(&plug);

	/*
	 * Wait for all previously submitted IO to complete.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_locked_list) {
		struct buffer_head *bh;

		jh = commit_transaction->t_locked_list->b_tprev;
		bh = jh2bh(jh);
		get_bh(bh);
		if (buffer_locked(bh)) {
			spin_unlock(&journal->j_list_lock);
			wait_on_buffer(bh);
			spin_lock(&journal->j_list_lock);
		}
		if (unlikely(!buffer_uptodate(bh))) {
			if (!trylock_page(bh->b_page)) {
				spin_unlock(&journal->j_list_lock);
				lock_page(bh->b_page);
				spin_lock(&journal->j_list_lock);
			}
			if (bh->b_page->mapping)
				set_bit(AS_EIO, &bh->b_page->mapping->flags);

			unlock_page(bh->b_page);
			SetPageError(bh->b_page);
			err = -EIO;
		}
		if (!inverted_lock(journal, bh)) {
			put_bh(bh);
			spin_lock(&journal->j_list_lock);
			continue;
		}
		if (buffer_jbd(bh) && bh2jh(bh) == jh &&
		    jh->b_transaction == commit_transaction &&
		    jh->b_jlist == BJ_Locked)
			__journal_unfile_buffer(jh);
		jbd_unlock_bh_state(bh);
		release_data_buffer(bh);
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);

	if (err) {
		char b[BDEVNAME_SIZE];

		printk(KERN_WARNING
			"JBD: Detected IO errors while flushing file data "
			"on %s\n", bdevname(journal->j_fs_dev, b));
		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
			journal_abort(journal, err);
		err = 0;
	}

	blk_start_plug(&plug);

	journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);

	/*
	 * If we found any dirty or locked buffers, then we should have
	 * looped back up to the write_out_data label.  If there weren't
	 * any then journal_clean_data_list should have wiped the list
	 * clean by now, so check that it is in fact empty.
	 */
	J_ASSERT (commit_transaction->t_sync_datalist == NULL);

	jbd_debug (3, "JBD: commit phase 3\n");

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_COMMIT;
	spin_unlock(&journal->j_state_lock);

	trace_jbd_commit_logging(journal, commit_transaction);
	J_ASSERT(commit_transaction->t_nr_buffers <=
		 commit_transaction->t_outstanding_credits);

	descriptor = NULL;
	bufs = 0;
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
		   release it. */

		if (is_journal_aborted(journal)) {
			clear_buffer_jbddirty(jh2bh(jh));
			JBUFFER_TRACE(jh, "journal is aborting: refile");
			journal_refile_buffer(journal, jh);
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

			jbd_debug(4, "JBD: get descriptor\n");

			descriptor = journal_get_descriptor_buffer(journal);
			if (!descriptor) {
				journal_abort(journal, -EIO);
				continue;
			}

			bh = jh2bh(descriptor);
			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
			journal_file_buffer(descriptor, commit_transaction,
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

		err = journal_next_log_block(journal, &blocknr);
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
			journal_abort(journal, err);
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
		 * by journal_next_log_block() also.
		 */
		commit_transaction->t_outstanding_credits--;

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		get_bh(jh2bh(jh));

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_buffer_jwrite(jh2bh(jh));
		/*
		 * akpm: journal_write_metadata_buffer() sets
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
		flags = journal_write_metadata_buffer(commit_transaction,
						      jh, &new_jh, blocknr);
		set_buffer_jwrite(jh2bh(new_jh));
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
			tag_flag |= JFS_FLAG_ESCAPE;
		if (!first_tag)
			tag_flag |= JFS_FLAG_SAME_UUID;

		tag = (journal_block_tag_t *) tagp;
		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
		tag->t_flags = cpu_to_be32(tag_flag);
		tagp += sizeof(journal_block_tag_t);
		space_left -= sizeof(journal_block_tag_t);

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
		    space_left < sizeof(journal_block_tag_t) + 16) {

			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);

start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
				submit_bh(WRITE_SYNC, bh);
			}
			cond_resched();

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

	blk_finish_plug(&plug);

	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

	jbd_debug(3, "JBD: commit phase 4\n");

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
		journal_unfile_buffer(journal, jh);

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
		 * which were created by journal_write_metadata_buffer().
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		journal_put_journal_head(jh);
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_buffer_jwrite(bh);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
		journal_file_buffer(jh, commit_transaction, BJ_Forget);
		/*
		 * Wake up any transactions which were waiting for this
		 * IO to complete. The barrier must be here so that changes
		 * by journal_file_buffer() take effect before wake_up_bit()
		 * does the waitqueue check.
		 */
		smp_mb();
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

	jbd_debug(3, "JBD: commit phase 5\n");

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
		journal_unfile_buffer(journal, jh);
		journal_put_journal_head(jh);
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

	if (err)
		journal_abort(journal, err);

	jbd_debug(3, "JBD: commit phase 6\n");

	/* All metadata is written, now write commit record and do cleanup */
	spin_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT);
	commit_transaction->t_state = T_COMMIT_RECORD;
	spin_unlock(&journal->j_state_lock);

	if (journal_write_commit_record(journal, commit_transaction))
		err = -EIO;

	if (err)
		journal_abort(journal, err);

	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

	jbd_debug(3, "JBD: commit phase 7\n");

	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;
		int try_to_free = 0;

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
		/*
		 * Get a reference so that bh cannot be freed before we are
		 * done with it.
		 */
		get_bh(bh);
		jbd_lock_bh_state(bh);
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
			jh->b_transaction == journal->j_running_transaction);

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
		 */
		if (jh->b_committed_data) {
			jbd_free(jh->b_committed_data, bh->b_size);
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
			}
		} else if (jh->b_frozen_data) {
			jbd_free(jh->b_frozen_data, bh->b_size);
			jh->b_frozen_data = NULL;
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
			__journal_remove_checkpoint(jh);
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
		 * by journal_forget, it may no longer be dirty and
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
		 * that buffer in the future after the "add to orphan"
		 * operation been committed,  That's not only a performance
		 * gain, it also stops aliasing problems if the buffer is
		 * left behind for writeback and gets reallocated for another
		 * use in a different page. */
		if (buffer_freed(bh) && !jh->b_next_transaction) {
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
			__journal_insert_checkpoint(jh, commit_transaction);
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
			/*
			 * The buffer on BJ_Forget list and not jbddirty means
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
			 * list.
			 */
			if (!jh->b_next_transaction)
				try_to_free = 1;
		}
		JBUFFER_TRACE(jh, "refile or unfile freed buffer");
		__journal_refile_buffer(jh);
		jbd_unlock_bh_state(bh);
		if (try_to_free)
			release_buffer_page(bh);
		else
			__brelse(bh);
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
	 */
	spin_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
		spin_unlock(&journal->j_state_lock);
		goto restart_loop;
	}

	/* Done with this transaction! */

	jbd_debug(3, "JBD: commit phase 8\n");

	J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);

	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));

	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time*3 +
				journal->j_average_commit_time) / 4;
	else
		journal->j_average_commit_time = commit_time;

	spin_unlock(&journal->j_state_lock);

	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
		__journal_drop_transaction(journal, commit_transaction);
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

	trace_jbd_end_commit(journal, commit_transaction);
	jbd_debug(1, "JBD: commit %d complete, head %d\n",
		  journal->j_commit_sequence, journal->j_tail_sequence);

	wake_up(&journal->j_wait_done_commit);
}
/* sysfs */
static ssize_t cm36686_poll_delay_show(struct device *dev,
					struct device_attribute *attr, char *buf)
{
	struct cm36686_data *cm36686 = dev_get_drvdata(dev);
	return sprintf(buf, "%lld\n", ktime_to_ns(cm36686->light_poll_delay));
}
Beispiel #27
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	struct timespec ts;
	struct rtc_time tm;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_AUD_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID));
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle, ionflag);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	getnstimeofday(&ts);
	rtc_time_to_tm(ts.tv_sec, &tm);
	pr_aud_info1("[ATS][start_recording][successful] at %lld \
		(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
		ktime_to_ns(ktime_get()),
		tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
static void sched_debug_header(struct seq_file *m)
{
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;

	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
	P(sched_clock_stable);
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");

#define P(x) \
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(sysctl_sched_latency);
	PN(sysctl_sched_min_granularity);
	PN(sysctl_sched_wakeup_granularity);
	P(sysctl_sched_child_runs_first);
	P(sysctl_sched_features);
#ifdef CONFIG_SCHED_HMP
	P(sched_small_task);
	P(sched_upmigrate);
	P(sched_downmigrate);
	P(sched_init_task_load_windows);
	P(sched_init_task_load_pelt);
	P(min_capacity);
	P(max_capacity);
	P(sched_use_pelt);
	P(sched_ravg_window);
#endif
#undef PN
#undef P

	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
	SEQ_printf(m, "\n");
}
Beispiel #29
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;


	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (dev->safe_state) {
			dev->last_state = dev->safe_state;
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return 0;
		}
	}

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return 0;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return idle_time;
}
static u32 get_ftt_frequency_poll(struct ftt_charger_device *ftt_pdev)
{
	register u32 count_pulse = 0;
	register u8 prev, curr = 0;
	register u32 count = 0;
	u32 freq=0;
	u32 rest=0;
	u64 s_time=0, e_time=0;
	u32 d_time=0;
#if FTT_FREQ_CORRECTION
	u32 sampling_per_freq = 0;
	u32 sampling_per_time = 0;
	u32 pre_pulse = 0;
	u32 post_pulse = 0;
	u32 resent_sampling = 0;
	u32 resent_time = 0;
#endif /* FTT_FREQ_CORRECTION */
	unsigned long ftt_frequency_lock_flags;
	u32 smp_cnt=0;

	spin_lock_irqsave(&ftt_pdev->ftt_frequency_lock, ftt_frequency_lock_flags);

	smp_cnt = ftt_pdev->fct.ftt_count[FTT_COUNT_FREQUENCY_SAMPLE_COUNT];
	s_time = ktime_to_ns(ktime_get());
	for (count=0;count<smp_cnt;count++) {
		prev = curr;
		curr = gpio_get_value(ftt_pdev->ftt_gpio);
#if FTT_FREQ_CORRECTION
			post_pulse++;
#endif /* FTT_FREQ_CORRECTION */
		if (curr  != prev) {
			count_pulse++;
#if FTT_FREQ_CORRECTION
			post_pulse = 0;
#endif /* FTT_FREQ_CORRECTION */
		}
#if FTT_FREQ_CORRECTION
		if (!count_pulse) pre_pulse++;
#endif /* FTT_FREQ_CORRECTION */
	}
	e_time = ktime_to_ns(ktime_get());
	d_time = e_time - s_time;

#if FTT_FREQ_CORRECTION
	if (count_pulse/2 >= 1) {
		sampling_per_freq = (smp_cnt - pre_pulse - post_pulse) / count_pulse;
		sampling_per_time = d_time / smp_cnt;
		resent_sampling = (count_pulse % 2) * sampling_per_freq + pre_pulse + post_pulse;
		resent_time = resent_sampling * sampling_per_time;
		d_time -= resent_time;
	}
#endif /* FTT_FREQ_CORRECTION */

	count_pulse = count_pulse / 2;

	if (d_time) {
		freq = (u32)(FTT_1_SEC / d_time * count_pulse);
		rest = (FTT_1_SEC % d_time) * count_pulse / d_time;
		freq += rest;
	}

#if FTT_FREQ_CORRECTION_TABLE
	freq += ftt_frequency_correction(ftt_pdev, freq);
#endif /* FTT_FREQ_CORRECTION_TABLE */

	DPRINT(FTT_LOG_SAMPLING, "FREQ smpl=%5uc/ms cnt= %3u delta=%4u.%03uus %3ukHz %6uHz rs=%2u cor=%d\n",(1000*1000*smp_cnt)/d_time, count_pulse, d_time/1000, d_time%1000, get_ftt_freqb_to_freqk(freq), freq, resent_sampling, ftt_frequency_correction(ftt_pdev, freq));

	spin_unlock_irqrestore(&ftt_pdev->ftt_frequency_lock, ftt_frequency_lock_flags);
	return freq;
}