Ejemplo n.º 1
0
/**
 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
 * @ms: amount of time to wait (in milliseconds) for scan to abort
 *
 */
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
	unsigned long timeout = jiffies + msecs_to_jiffies(ms);

	lockdep_assert_held(&priv->mutex);

	IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");

	iwl_do_scan_abort(priv);

	while (time_before_eq(jiffies, timeout)) {
		if (!test_bit(STATUS_SCAN_HW, &priv->status))
			goto finished;
		msleep(20);
	}

	return;

 finished:
	/*
	 * Now STATUS_SCAN_HW is clear. This means that the
	 * device finished, but the background work is going
	 * to execute at best as soon as we release the mutex.
	 * Since we need to be able to issue a new scan right
	 * after this function returns, run the complete here.
	 * The STATUS_SCAN_COMPLETE bit will then be cleared
	 * and prevent the background work from "completing"
	 * a possible new scan.
	 */
	iwl_process_scan_complete(priv);
}
Ejemplo n.º 2
0
static void nvidia_tlbflush(struct agp_memory *mem)
{
	unsigned long end;
	u32 wbc_reg, temp;
	int i;

	/* flush chipset */
	if (nvidia_private.wbc_mask) {
		pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
		wbc_reg |= nvidia_private.wbc_mask;
		pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);

		end = jiffies + 3*HZ;
		do {
			pci_read_config_dword(nvidia_private.dev_1,
					NVIDIA_1_WBC, &wbc_reg);
			if (time_before_eq(end, jiffies)) {
				printk(KERN_ERR PFX
				    "TLB flush took more than 3 seconds.\n");
			}
		} while (wbc_reg & nvidia_private.wbc_mask);
	}

	/* flush TLB entries */
	for (i = 0; i < 32 + 1; i++)
		temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
	for (i = 0; i < 32 + 1; i++)
		temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
}
void br_fdb_cleanup(struct net_bridge *br)
{
	int i;
	unsigned long timeout;

	timeout = __timeout(br);

	write_lock_bh(&br->hash_lock);
	for (i=0;i<BR_HASH_SIZE;i++) {
		struct net_bridge_fdb_entry *f;

		f = br->hash[i];
		while (f != NULL) {
			struct net_bridge_fdb_entry *g;

			g = f->next_hash;
			if (!f->is_static &&
			    time_before_eq(f->ageing_timer, timeout)) {
				__hash_unlink(f);
				br_fdb_put(f);
			}
			f = g;
		}
	}
	write_unlock_bh(&br->hash_lock);
}
Ejemplo n.º 4
0
static inline void adapter_reset(struct net_device *dev)
{
	unsigned long timeout;
	elp_device *adapter = netdev_priv(dev);
	unsigned char orig_hcr = adapter->hcr_val;

	outb_control(0, dev);

	if (inb_status(dev->base_addr) & ACRF) {
		do {
			inb_command(dev->base_addr);
			timeout = jiffies + 2*HZ/100;
			while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF));
		} while (inb_status(dev->base_addr) & ACRF);
		set_hsf(dev, HSF_PCB_NAK);
	}
	outb_control(adapter->hcr_val | ATTN | DIR, dev);
	mdelay(10);
	outb_control(adapter->hcr_val & ~ATTN, dev);
	mdelay(10);
	outb_control(adapter->hcr_val | FLSH, dev);
	mdelay(10);
	outb_control(adapter->hcr_val & ~FLSH, dev);
	mdelay(10);

	outb_control(orig_hcr, dev);
	if (!start_receive(dev, &adapter->tx_pcb))
		pr_err("%s: start receive command failed\n", dev->name);
}
Ejemplo n.º 5
0
static void remove_35mm_do_work(struct work_struct *work)
{
	wake_lock_timeout(&hi->headset_wake_lock, 2.5*HZ);

	H2W_DBG("");
	/*To solve the insert, remove, insert headset problem*/
	if (time_before_eq(jiffies, hi->insert_jiffies))
		msleep(800);

	if (hi->is_ext_insert) {
		H2WI("Skip 3.5mm headset plug out!!!");
		if (hi->is_hpin_stable)
			*(hi->is_hpin_stable) = 1;
		return;
	}

	pr_info("3.5mm_headset plug out\n");

	if (pd->key_event_disable != NULL)
		pd->key_event_disable();

	if (hi->mic_bias_state) {
		turn_mic_bias_on(0);
		hi->mic_bias_state = 0;
	}
	hi->ext_35mm_status = 0;
	if (hi->is_hpin_stable)
		*(hi->is_hpin_stable) = 0;

	/* Notify framework via switch class */
	mutex_lock(&hi->mutex_lock);
	switch_set_state(&hi->hs_change, hi->ext_35mm_status);
	mutex_unlock(&hi->mutex_lock);
}
Ejemplo n.º 6
0
void xmm_power_runtime_resume()
{
      int i = 0;
	if (xmm_curr_power_state == XMM_POW_S_USB_L2
	|| xmm_curr_power_state == XMM_POW_S_USB_L2_TO_L0) {
		if (gpio_get_value(GPIO_HOST_WAKEUP)) {
			/* It's AP wakeup CP */
			unsigned long least_time = xmm_last_suspend + msecs_to_jiffies(100);
			unsigned long tio;

			/* If resume just after suspending, the modem may fail
			   to resume, check this case and delay for awhile */
			while (time_before_eq(jiffies, least_time)) {
				pr_info("xmm_power: suspending delayed\n");
				msleep(10);
			}

			while(i < MAX_RESUME_RETRY_TIME) {
				gpio_set_value(GPIO_SLAVE_WAKEUP, 1);
				tio = jiffies + msecs_to_jiffies(200);
				while (time_before_eq(jiffies, tio)) {
					msleep(10);
					if (gpio_get_value(GPIO_HOST_WAKEUP) == 0)
						break;
				}

				if (gpio_get_value(GPIO_HOST_WAKEUP) != 0) {
					pr_err("xmm_power: Wait for resume USB timeout, retry\n");
					gpio_set_value(GPIO_SLAVE_WAKEUP, 0);
					msleep(10);
				}
				else {
					break;
				}
				i++;
			}

			if(i == MAX_RESUME_RETRY_TIME) {
				pr_err("xmm_power: Wait for resume USB timeout, No Method to Resolve\n");
			}
		}
	}
	else {
		pr_err("xmm_power: Invalid state %d for runtime resume.\n",
			xmm_curr_power_state);
	}
}
Ejemplo n.º 7
0
static int ds1374_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
	struct i2c_client *client = to_i2c_client(dev);
	struct ds1374 *ds1374 = i2c_get_clientdata(client);
	struct rtc_time now;
	unsigned long new_alarm, itime;
	int cr;
	int ret = 0;

	if (client->irq <= 0)
		return -EINVAL;

	ret = ds1374_read_time(dev, &now);
	if (ret < 0)
		return ret;

	rtc_tm_to_time(&alarm->time, &new_alarm);
	rtc_tm_to_time(&now, &itime);

	/* This can happen due to races, in addition to dates that are
	 * truly in the past.  To avoid requiring the caller to check for
	 * races, dates in the past are assumed to be in the recent past
	 * (i.e. not something that we'd rather the caller know about via
	 * an error), and the alarm is set to go off as soon as possible.
	 */
	if (time_before_eq(new_alarm, itime))
		new_alarm = 1;
	else
		new_alarm -= itime;

	mutex_lock(&ds1374->mutex);

	ret = cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
	if (ret < 0)
		goto out;

	/* Disable any existing alarm before setting the new one
	 * (or lack thereof). */
	cr &= ~DS1374_REG_CR_WACE;

	ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
	if (ret < 0)
		goto out;

	ret = ds1374_write_rtc(client, new_alarm, DS1374_REG_WDALM0, 3);
	if (ret)
		goto out;

	if (alarm->enabled) {
		cr |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
		cr &= ~DS1374_REG_CR_WDALM;

		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
	}

out:
	mutex_unlock(&ds1374->mutex);
	return ret;
}
Ejemplo n.º 8
0
static __inline__ int  __nat25_has_expired(_adapter *priv,
				struct nat25_network_db_entry *fdb)
{
	if (time_before_eq(fdb->ageing_timer, __nat25_timeout(priv)))
		return 1;

	return 0;
}
static void remove_35mm_do_work(struct work_struct *work)
{
	int state;

	if (hi->is_wake_lock_ready)
		wake_lock_timeout(&hi->headset_wake_lock, 2.5*HZ);

	H2W_DBG("");
	/*To solve the insert, remove, insert headset problem*/
	if (time_before_eq(jiffies, hi->insert_jiffies))
		msleep(800);
	if (hi->is_ext_insert) {
		H2WI("Skip 3.5mm headset plug out!!!");
		return;
	}

	printk(KERN_INFO "3.5mm_headset plug out\n");
	mutex_lock(&hi->mutex_lock);
	state = switch_get_state(&hi->sdev);

	if (hi->mic_bias_state) {
		turn_mic_bias_on(0);
		hi->mic_bias_state = 0;
	}

	/* For HW Metrico lab test */
	if (hi->metrico_status)
		enable_metrico_headset(0);

	microp_notify_unplug_mic();

	if (atomic_read(&hi->btn_state))
		button_released(atomic_read(&hi->btn_state));
	hi->ext_35mm_status = HTC_35MM_UNPLUG;

	if (hi->key_int_shutdown_gpio)
		gpio_set_value(hi->key_int_shutdown_gpio, 0);

	if (hi->ext_mic_sel)
		gpio_direction_output(hi->ext_mic_sel, 0);

	if (hi->h2w_dev_type == H2W_TVOUT) {
		state &= ~(BIT_HEADSET | BIT_35MM_HEADSET);
		state |= BIT_HEADSET_NO_MIC;
		switch_set_state(&hi->sdev, state);
	} else if (hi->cable_in1 && !gpio_get_value(hi->cable_in1)) {
		state &= ~BIT_35MM_HEADSET;
		switch_set_state(&hi->sdev, state);
		queue_delayed_work(detect_wq, &detect_h2w_work, H2W_NO_DELAY);
	} else {
		state &= ~(BIT_HEADSET | BIT_HEADSET_NO_MIC |
			BIT_35MM_HEADSET);
		switch_set_state(&hi->sdev, state);
	}

	mutex_unlock(&hi->mutex_lock);
}
Ejemplo n.º 10
0
static __inline__ int has_expired(struct net_bridge *br,
				  struct net_bridge_fdb_entry *fdb)
{
	if (!fdb->is_static &&
	    time_before_eq(fdb->ageing_timer, __timeout(br)))
		return 1;

	return 0;
}
Ejemplo n.º 11
0
static inline void kb_wait(void)
{
    unsigned long start = jiffies;
    unsigned long timeout = start + HZ/2;

    do {
        if (! (jazz_read_status() & 0x02))
            return;
    } while (time_before_eq(jiffies, timeout));
}
Ejemplo n.º 12
0
static void remove_detect_work_func(struct work_struct *work)
{
    int state;

    wake_lock_timeout(&hi->hs_wake_lock, HS_WAKE_LOCK_TIMEOUT);

    HS_DBG();

    if (time_before_eq(jiffies, hi->insert_jiffies + HZ)) {
        HS_LOG("Waiting for HPIN stable");
        msleep(HS_DELAY_SEC - HS_DELAY_REMOVE);
    }

    if (hi->is_ext_insert) {
        HS_LOG("Headset has been inserted");
        return;
    }

    set_35mm_hw_state(0);

    if (hi->metrico_status)
        enable_metrico_headset(0);

    if (atomic_read(&hi->btn_state))
        button_released(atomic_read(&hi->btn_state));
    hi->ext_35mm_status = HTC_35MM_UNPLUG;

    mutex_lock(&hi->mutex_lock);

    state = switch_get_state(&hi->sdev);
    if (!(state & MASK_35MM_HEADSET)) {
        HS_LOG("Headset has been removed");
        mutex_unlock(&hi->mutex_lock);
        return;
    }

#if 0
    if (hi->cable_in1 && !gpio_get_value(hi->cable_in1)) {
        state &= ~BIT_35MM_HEADSET;
        switch_set_state(&hi->sdev, state);
        queue_delayed_work(detect_wq, &detect_h2w_work,
                           HS_DELAY_ZERO_JIFFIES);
    } else {
        state &= ~(MASK_35MM_HEADSET | MASK_FM_ATTRIBUTE);
        switch_set_state(&hi->sdev, state);
    }
#else
    state &= ~(MASK_35MM_HEADSET | MASK_FM_ATTRIBUTE);
    switch_set_state(&hi->sdev, state);
#endif

    HS_LOG_TIME("Remove 3.5mm headset");

    mutex_unlock(&hi->mutex_lock);
}
Ejemplo n.º 13
0
/**
 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
 *
 * Decrease the counter of wakeup events being processed after it was increased
 * by pm_wakeup_event().
 */
static void pm_wakeup_timer_fn(unsigned long data)
{
	unsigned long flags;

	spin_lock_irqsave(&events_lock, flags);
	if (events_timer_expires
	    && time_before_eq(events_timer_expires, jiffies)) {
		events_in_progress--;
		events_timer_expires = 0;
	}
	spin_unlock_irqrestore(&events_lock, flags);
}
Ejemplo n.º 14
0
int temac_indirect_busywait(struct temac_local *lp)
{
	unsigned long end = jiffies + 2;

	while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
		if (time_before_eq(end, jiffies)) {
			WARN_ON(1);
			return -ETIMEDOUT;
		}
		msleep(1);
	}
	return 0;
}
/* Wait till MDIO interface is ready to accept a new transaction.*/
int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
	unsigned long end = jiffies + 2;
	while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
		 XAE_MDIO_MCR_READY_MASK)) {
		if (time_before_eq(end, jiffies)) {
			WARN_ON(1);
			return -ETIMEDOUT;
		}
		udelay(1);
	}
	return 0;
}
Ejemplo n.º 16
0
static int mdfld_dsi_h8c7_power_off(struct mdfld_dsi_config *dsi_config)
{
	struct mdfld_dsi_pkg_sender *sender =
		mdfld_dsi_get_pkg_sender(dsi_config);
	unsigned long wait_timeout;
	int err;

	PSB_DEBUG_ENTRY("Turn off video mode TMD panel...\n");

	if (!sender) {
		DRM_ERROR("Failed to get DSI packet sender\n");
		return -EINVAL;
	}

	/*send SHUT_DOWN packet*/
	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
				MDFLD_DSI_DPI_SPK_SHUT_DOWN);
	if (err) {
		DRM_ERROR("Failed to send turn off packet\n");
		return err;
	}

	/* FIXME disable CABC later*/

	/*set display off*/
	mdfld_dsi_send_mcs_long_hs(sender, h8c7_set_display_off, 4, 0);
	wait_timeout = jiffies + (HZ / 100);
	while (time_before_eq(jiffies, wait_timeout))
		cpu_relax();

	/* sleep in and wait for 150ms. */
	mdfld_dsi_send_mcs_long_hs(sender, h8c7_enter_sleep_mode, 4, 0);
	wait_timeout = jiffies + (3 * HZ / 20);
	while (time_before_eq(jiffies, wait_timeout))
		cpu_relax();

	return 0;
}
int hs_hpin_stable(void)
{
	unsigned long last_hpin_jiffies = 0;
	unsigned long unstable_jiffies = 1.2 * HZ;

	HS_DBG();

	last_hpin_jiffies = hi->hpin_jiffies;

	if (time_before_eq(jiffies, last_hpin_jiffies + unstable_jiffies))
		return 0;

	return 1;
}
Ejemplo n.º 18
0
int
gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
		const time_t shm_createtime)
{
#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
	struct pid *pid = NULL;
	time_t starttime;

	if (unlikely(!grsec_enable_chroot_shmat))
		return 1;

	if (likely(!proc_is_chrooted(current)))
		return 1;

	read_lock(&tasklist_lock);

	pid = find_vpid(shm_cprid);
	if (pid) {
		struct task_struct *p;
		p = pid_task(pid, PIDTYPE_PID);
		task_lock(p);
		starttime = p->start_time.tv_sec;
		if (unlikely(!have_same_root(current, p) &&
			     time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
			task_unlock(p);
			read_unlock(&tasklist_lock);
			gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
			return 0;
		}
		task_unlock(p);
	} else {
		pid = find_vpid(shm_lapid);
		if (pid) {
			struct task_struct *p;
			p = pid_task(pid, PIDTYPE_PID);
			task_lock(p);
			if (unlikely(!have_same_root(current, p))) {
				task_unlock(p);
				read_unlock(&tasklist_lock);
				gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
				return 0;
			}
			task_unlock(p);
		}
	}

	read_unlock(&tasklist_lock);
#endif
	return 1;
}
Ejemplo n.º 19
0
/*
 * Check whether the inode attributes are still valid
 *
 * If the attribute validity timeout has expired, then fetch the fresh
 * attributes with a 'getattr' request
 *
 * I'm not sure why cached attributes are never returned for the root
 * inode, this is probably being too cautious.
 */
static int fuse_revalidate(struct dentry *entry)
{
	struct inode *inode = entry->d_inode;
	struct fuse_inode *fi = get_fuse_inode(inode);
	struct fuse_conn *fc = get_fuse_conn(inode);

	if (!fuse_allow_task(fc, current))
		return -EACCES;
	if (get_node_id(inode) != FUSE_ROOT_ID &&
	    time_before_eq(jiffies, fi->i_time))
		return 0;

	return fuse_do_getattr(inode);
}
Ejemplo n.º 20
0
void
gr_handle_crash(struct task_struct *task, const int sig)
{
	struct acl_subject_label *curr;
	struct acl_subject_label *curr2;
	struct task_struct *tsk, *tsk2;
	const struct cred *cred;
	const struct cred *cred2;

	if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
		return;

	if (unlikely(!gr_acl_is_enabled()))
		return;

	curr = task->acl;

	if (!(curr->resmask & (1 << GR_CRASH_RES)))
		return;

	if (time_before_eq(curr->expires, get_seconds())) {
		curr->expires = 0;
		curr->crashes = 0;
	}

	curr->crashes++;

	if (!curr->expires)
		curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;

	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
	    time_after(curr->expires, get_seconds())) {
		rcu_read_lock();
		cred = __task_cred(task);
		if (cred->uid && proc_is_setxid(cred)) {
			gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
			spin_lock(&gr_uid_lock);
			gr_insert_uid(cred->uid, curr->expires);
			spin_unlock(&gr_uid_lock);
			curr->expires = 0;
			curr->crashes = 0;
			read_lock(&tasklist_lock);
			do_each_thread(tsk2, tsk) {
				cred2 = __task_cred(tsk);
				if (tsk != task && cred2->uid == cred->uid)
					gr_fake_force_sig(SIGKILL, tsk);
			} while_each_thread(tsk2, tsk);
Ejemplo n.º 21
0
static void shaper_kick(struct shaper *shaper)
{
	struct sk_buff *skb;

	/*
	 *	Walk the list (may be empty)
	 */

	while((skb=skb_peek(&shaper->sendq))!=NULL)
	{
		/*
		 *	Each packet due to go out by now (within an error
		 *	of SHAPER_BURST) gets kicked onto the link
		 */

		if(sh_debug)
			printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
		if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
		{
			/*
			 *	Pull the frame and get interrupts back on.
			 */

			skb_unlink(skb, &shaper->sendq);
			if (shaper->recovery <
			    SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
				shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
			/*
			 *	Pass on to the physical target device via
			 *	our low level packet thrower.
			 */

			SHAPERCB(skb)->shapepend=0;
			shaper_queue_xmit(shaper, skb);	/* Fire */
		}
		else
			break;
	}

	/*
	 *	Next kick.
	 */

	if(skb!=NULL)
		mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
}
Ejemplo n.º 22
0
int is_audio_jack_pin_stable(void)
{
	unsigned long flags = 0;
	unsigned long last_hpin_jiffies = 0;

	if (!pjack_info)
		return 1;

	spin_lock_irqsave(&pjack_info->spin_lock, flags);
	last_hpin_jiffies = pjack_info->hpin_jiffies;
	spin_unlock_irqrestore(&pjack_info->spin_lock, flags);

	if (time_before_eq(jiffies, last_hpin_jiffies + JIFFIES_1_SEC))
		return 0;

	return 1;
}
Ejemplo n.º 23
0
int
gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
		const time_t shm_createtime)
{
#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
	struct task_struct *p;
	time_t starttime;

	if (unlikely(!grsec_enable_chroot_shmat))
		return 1;

	if (likely(!proc_is_chrooted(current)))
		return 1;

	rcu_read_lock();
	read_lock(&tasklist_lock);

	if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
		starttime = p->start_time.tv_sec;
		if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
			if (have_same_root(current, p)) {
				goto allow;
			} else {
				read_unlock(&tasklist_lock);
				rcu_read_unlock();
				gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
				return 0;
			}
		}
		/* creator exited, pid reuse, fall through to next check */
	}
	if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
		if (unlikely(!have_same_root(current, p))) {
			read_unlock(&tasklist_lock);
			rcu_read_unlock();
			gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
			return 0;
		}
	}

allow:
	read_unlock(&tasklist_lock);
	rcu_read_unlock();
#endif
	return 1;
}
Ejemplo n.º 24
0
/**
 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
 * @ms: amount of time to wait (in milliseconds) for scan to abort
 *
 */
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
	unsigned long timeout = jiffies + msecs_to_jiffies(ms);

	lockdep_assert_held(&priv->mutex);

	IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");

	iwl_do_scan_abort(priv);

	while (time_before_eq(jiffies, timeout)) {
		if (!test_bit(STATUS_SCAN_HW, &priv->status))
			break;
		msleep(20);
	}

	return test_bit(STATUS_SCAN_HW, &priv->status);
}
Ejemplo n.º 25
0
static int gr_log_start(int audit)
{
	char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
	char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
	unsigned long curr_secs = get_seconds();

	if (audit == GR_DO_AUDIT)
		goto set_fmt;

	if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
		grsec_alert_wtime = curr_secs;
		grsec_alert_fyet = 0;
	} else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
		    && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
		grsec_alert_fyet++;
	} else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
		grsec_alert_wtime = curr_secs;
		grsec_alert_fyet++;
		printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
		return FLOODING;
	}
	else return FLOODING;

set_fmt:
#endif
	memset(buf, 0, PAGE_SIZE);
	if (current->signal->curr_ip && gr_acl_is_enabled()) {
		sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
		snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
	} else if (current->signal->curr_ip) {
		sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
		snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
	} else if (gr_acl_is_enabled()) {
		sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
		snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
	} else {
		sprintf(fmt, "%s%s", loglevel, "grsec: ");
		strcpy(buf, fmt);
	}

	return NO_FLOODING;
}
Ejemplo n.º 26
0
void musb_tx_zlp_qmu(struct musb *musb, u32 ep_num)
{
	/* sent ZLP through PIO */
	void __iomem *epio = musb->endpoints[ep_num].regs;
	void __iomem *mbase = musb->mregs;
	unsigned long timeout = jiffies + HZ;
	int is_timeout = 1;
	u16 csr;

	QMU_WARN("TX ZLP direct sent\n");
	musb_ep_select(mbase, ep_num);

	/* disable dma for pio */
	csr = musb_readw(epio, MUSB_TXCSR);
	csr &= ~MUSB_TXCSR_DMAENAB;
	musb_writew(epio, MUSB_TXCSR, csr);

	/* TXPKTRDY */
	csr = musb_readw(epio, MUSB_TXCSR);
	csr |= MUSB_TXCSR_TXPKTRDY;
	musb_writew(epio, MUSB_TXCSR, csr);

	/* wait ZLP sent */
	while (time_before_eq(jiffies, timeout)) {
		csr = musb_readw(epio, MUSB_TXCSR);
		if (!(csr & MUSB_TXCSR_TXPKTRDY)) {
			is_timeout = 0;
			break;
		}
	}

	/* re-enable dma for qmu */
	csr = musb_readw(epio, MUSB_TXCSR);
	csr |= MUSB_TXCSR_DMAENAB;
	musb_writew(epio, MUSB_TXCSR, csr);

	if (is_timeout) {
		QMU_ERR("TX ZLP sent fail???\n");
	}
	QMU_WARN("TX ZLP sent done\n");
}
Ejemplo n.º 27
0
static int einkfb_schedule_timeout_guts(unsigned long hardware_timeout, einkfb_hardware_ready_t hardware_ready, void *data, bool interruptible)
{
    unsigned long start_time = jiffies, stop_time = start_time + hardware_timeout,
        timeout = CONTROLLER_COMMON_TIMEOUT_MIN;
    int result = EINKFB_SUCCESS;

    // Ask the hardware whether it's ready or not.  And, if it's not ready, start yielding
    // the CPU for CONTROLLER_COMMON_TIMEOUT_MIN jiffies, increasing the yield time up to
    // CONTROLLER_COMMON_TIMEOUT_MAX jiffies.  Time out after the requested number of
    // of jiffies has occurred.
    //
    while ( !(*hardware_ready)(data) && time_before_eq(jiffies, stop_time) )
    {
        timeout = min(timeout++, CONTROLLER_COMMON_TIMEOUT_MAX);
        
        if ( interruptible )
            schedule_timeout_interruptible(timeout);
        else
            schedule_timeout(timeout);
    }

    if ( time_after(jiffies, stop_time) )
    {
       einkfb_print_crit("Timed out waiting for the hardware to become ready!\n");
       result = EINKFB_FAILURE;
    }
    else
    {
        // For debugging purposes, dump the time it took for the hardware to
        // become ready if it was more than CONTROLLER_COMMON_TIMEOUT_MAX.
        //
        stop_time = jiffies - start_time;
        
        if ( CONTROLLER_COMMON_TIMEOUT_MAX < stop_time )
            einkfb_debug("Timeout time = %ld\n", stop_time);
    }

    return ( result );    
}
Ejemplo n.º 28
0
/*
 * Process delayed final ACKs that we haven't subsumed into a subsequent call.
 */
static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
{
	unsigned long j = jiffies, next_j;
	unsigned int channel;
	bool set;

again:
	next_j = j + LONG_MAX;
	set = false;
	for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
		struct rxrpc_channel *chan = &conn->channels[channel];
		unsigned long ack_at;

		if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
			continue;

		smp_rmb(); /* vs rxrpc_disconnect_client_call */
		ack_at = READ_ONCE(chan->final_ack_at);

		if (time_before(j, ack_at)) {
			if (time_before(ack_at, next_j)) {
				next_j = ack_at;
				set = true;
			}
			continue;
		}

		if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
				       &conn->flags))
			rxrpc_conn_retransmit_call(conn, NULL, channel);
	}

	j = jiffies;
	if (time_before_eq(next_j, j))
		goto again;
	if (set)
		rxrpc_reduce_conn_timer(conn, next_j);
}
Ejemplo n.º 29
0
/*
 * Insert a blocked lock into the global list
 */
static void
nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
{
	struct nlm_block **bp, *b;

	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
	if (block->b_queued)
		nlmsvc_remove_block(block);
	bp = &nlm_blocked;
	if (when != NLM_NEVER) {
		if ((when += jiffies) > NLM_NEVER)
			when = NLM_NEVER;
		while ((b = *bp) && time_before_eq(b->b_when,when) && b->b_when != NLM_NEVER)
			bp = &b->b_next;
	} else
		while ((b = *bp))
			bp = &b->b_next;

	block->b_queued = 1;
	block->b_when = when;
	block->b_next = b;
	*bp = block;
}
Ejemplo n.º 30
0
static irqreturn_t button_irq_handler(int irq, void *dev_id)
{
	unsigned int irq_mask = IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW;

	HS_DBG();

	if(time_before_eq(jiffies, last_key_jiffies+unstable_jiffies) && last_key_jiffies != 0) {
		queue_delayed_work(button_wq, &cancel_button_work, HS_JIFFIES_ZERO);
		HS_LOG("The KEY event is unstable,remove debounce.");
	}

	disable_irq_nosync(hi->key_irq);
	queue_delayed_work(button_wq, &hs_key_irq_enable, irq_delay);

	hi->key_irq_type ^= irq_mask;
	irq_set_irq_type(hi->key_irq, hi->key_irq_type);

	wake_lock_timeout(&hi->hs_wake_lock, HS_WAKE_LOCK_TIMEOUT);
	queue_delayed_work(button_wq, &button_gpio_work, unstable_jiffies);
	last_key_jiffies = jiffies;

	return IRQ_HANDLED;
}