void hvm_pci_intx_assert(
        struct domain *d, unsigned int device, unsigned int intx)
{
    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    unsigned int gsi;

    ASSERT((device <= 31) && (intx <= 3));

    if ( __test_and_set_bit(device * 4 + intx, &hvm_irq->pci_intx.i) )
        return;
    gsi = hvm_pci_intx_gsi(device, intx);
    if ( ++hvm_irq->gsi_assert_count[gsi] == 1 )
        viosapic_set_irq(d, gsi, 1);
}
Esempio n. 2
0
void hvm_isa_irq_assert(
    struct domain *d, unsigned int isa_irq)
{
    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);

    ASSERT(isa_irq <= 15);

    spin_lock(&d->arch.hvm_domain.irq_lock);

    if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
            (hvm_irq->gsi_assert_count[gsi]++ == 0) )
        assert_irq(d, gsi, isa_irq);

    spin_unlock(&d->arch.hvm_domain.irq_lock);
}
Esempio n. 3
0
int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock,
				 unsigned long *x)
{
	unsigned long flags;
	int s;

	flags = hard_local_irq_save();
	if (!arch_spin_trylock(&lock->arch_lock)) {
		hard_local_irq_restore(flags);
		return 0;
	}
	s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
	*x = arch_mangle_irq_bits(s, flags);

	return 1;
}
Esempio n. 4
0
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
				   unsigned long flags, int entry)
{
	struct pmb_entry *pmbe;
	unsigned long irqflags;
	void *ret = NULL;
	int pos;

	write_lock_irqsave(&pmb_rwlock, irqflags);

	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
		if (unlikely(pos < 0)) {
			ret = ERR_PTR(pos);
			goto out;
		}
	} else {
		if (__test_and_set_bit(entry, pmb_map)) {
			ret = ERR_PTR(-ENOSPC);
			goto out;
		}

		pos = entry;
	}

	write_unlock_irqrestore(&pmb_rwlock, irqflags);

	pmbe = &pmb_entry_list[pos];

	memset(pmbe, 0, sizeof(struct pmb_entry));

	spin_lock_init(&pmbe->lock);

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
	pmbe->entry	= pos;

	return pmbe;

out:
	write_unlock_irqrestore(&pmb_rwlock, irqflags);
	return ret;
}
Esempio n. 5
0
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int key_index;
	int gpio;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);

	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} else {
		key_index = out * mi->ninputs;
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
#ifdef ROW_SCAN   //ZTE_KB_ZFJ_20110325
		{
			gpio_set_value(gpio, !polarity);
			gpio_direction_input(gpio);
		}
#else
			gpio_set_value(gpio, !polarity);
#endif	
		else
			gpio_direction_input(gpio);
		out++;
	}
Esempio n. 6
0
/*
 * Function called when an ioctl is performed on the event dev entry.
 * It uploads an effect to the device
 */
static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
{
	struct iforce *iforce = input_get_drvdata(dev);
	struct iforce_core_effect *core_effect = &iforce->core_effects[effect->id];
	int ret;

	if (__test_and_set_bit(FF_CORE_IS_USED, core_effect->flags)) {
		/* Check the effect is not already being updated */
		if (test_bit(FF_CORE_UPDATE, core_effect->flags))
			return -EAGAIN;
	}

/*
 * Upload the effect
 */
	switch (effect->type) {

		case FF_PERIODIC:
			ret = iforce_upload_periodic(iforce, effect, old);
			break;

		case FF_CONSTANT:
			ret = iforce_upload_constant(iforce, effect, old);
			break;

		case FF_SPRING:
		case FF_DAMPER:
			ret = iforce_upload_condition(iforce, effect, old);
			break;

		default:
			return -EINVAL;
	}

	if (ret == 0) {
		/* A packet was sent, forbid new updates until we are notified
		 * that the packet was updated
		 */
		set_bit(FF_CORE_UPDATE, core_effect->flags);
	}
	return ret;
}
Esempio n. 7
0
static void __hvm_pci_intx_assert(
    struct domain *d, unsigned int device, unsigned int intx)
{
    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    unsigned int gsi, link, isa_irq;

    ASSERT((device <= 31) && (intx <= 3));

    if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
        return;

    gsi = hvm_pci_intx_gsi(device, intx);
    if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
        assert_gsi(d, gsi);

    link    = hvm_pci_intx_link(device, intx);
    isa_irq = hvm_irq->pci_link.route[link];
    if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
            (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
        assert_irq(d, isa_irq, isa_irq);
}
Esempio n. 8
0
/**
 * media_entity_graph_walk_next - Get the next entity in the graph
 * @graph: Media graph structure
 *
 * Perform a depth-first traversal of the given media entities graph.
 *
 * The graph structure must have been previously initialized with a call to
 * media_entity_graph_walk_start().
 *
 * Return the next entity in the graph or NULL if the whole graph have been
 * traversed.
 */
struct media_entity *
media_entity_graph_walk_next(struct media_entity_graph *graph)
{
	if (stack_top(graph) == NULL)
		return NULL;

	/*
	 * Depth first search. Push entity to stack and continue from
	 * top of the stack until no more entities on the level can be
	 * found.
	 */
	while (link_top(graph) < stack_top(graph)->num_links) {
		struct media_entity *entity = stack_top(graph);
		struct media_link *link = &entity->links[link_top(graph)];
		struct media_entity *next;

		/* The link is not enabled so we do not follow. */
		if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
			link_top(graph)++;
			continue;
		}

		/* Get the entity in the other end of the link . */
		next = media_entity_other(entity, link);
		if (WARN_ON(next->id >= MEDIA_ENTITY_ENUM_MAX_ID))
			return NULL;

		/* Has the entity already been visited? */
		if (__test_and_set_bit(next->id, graph->entities)) {
			link_top(graph)++;
			continue;
		}

		/* Push the new entity to stack and start over. */
		link_top(graph)++;
		stack_push(graph, next);
	}

	return stack_pop(graph);
}
Esempio n. 9
0
int __opal_async_get_token(void)
{
	unsigned long flags;
	int token;

	spin_lock_irqsave(&opal_async_comp_lock, flags);
	token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
	if (token >= opal_max_async_tokens) {
		token = -EBUSY;
		goto out;
	}

	if (__test_and_set_bit(token, opal_async_token_map)) {
		token = -EBUSY;
		goto out;
	}

	__clear_bit(token, opal_async_complete_map);

out:
	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
	return token;
}
Esempio n. 10
0
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
			struct task_struct *tsk)
{
	unsigned int i, id, cpu = smp_processor_id();
	unsigned long *map;

	/* No lockless fast path .. yet */
	raw_spin_lock(&context_lock);

	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
		cpu, next, next->context.active, next->context.id);

#ifdef CONFIG_SMP
	/* Mark us active and the previous one not anymore */
	next->context.active++;
	if (prev) {
		pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
		WARN_ON(prev->context.active < 1);
		prev->context.active--;
	}

 again:
#endif /* CONFIG_SMP */

	/* If we already have a valid assigned context, skip all that */
	id = next->context.id;
	if (likely(id != MMU_NO_CONTEXT)) {
#ifdef DEBUG_MAP_CONSISTENCY
		if (context_mm[id] != next)
			pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
			       next, id, id, context_mm[id]);
#endif
		goto ctxt_ok;
	}

	/* We really don't have a context, let's try to acquire one */
	id = next_context;
	if (id > last_context)
		id = first_context;
	map = context_map;

	/* No more free contexts, let's try to steal one */
	if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
		if (num_online_cpus() > 1) {
			id = steal_context_smp(id);
			if (id == MMU_NO_CONTEXT)
				goto again;
			goto stolen;
		}
#endif /* CONFIG_SMP */
		if (no_selective_tlbil)
			id = steal_all_contexts();
		else
			id = steal_context_up(id);
		goto stolen;
	}
	nr_free_contexts--;

	/* We know there's at least one free context, try to find it */
	while (__test_and_set_bit(id, map)) {
		id = find_next_zero_bit(map, last_context+1, id);
		if (id > last_context)
			id = first_context;
	}
 stolen:
	next_context = id + 1;
	context_mm[id] = next;
	next->context.id = id;
	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);

	context_check_map();
 ctxt_ok:

	/* If that context got marked stale on this CPU, then flush the
	 * local TLB for it and unmark it before we use it
	 */
	if (test_bit(id, stale_map[cpu])) {
		pr_hardcont(" | stale flush %d [%d..%d]",
			    id, cpu_first_thread_sibling(cpu),
			    cpu_last_thread_sibling(cpu));

		local_flush_tlb_mm(next);

		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
		for (i = cpu_first_thread_sibling(cpu);
		     i <= cpu_last_thread_sibling(cpu); i++) {
			if (stale_map[i])
				__clear_bit(id, stale_map[i]);
		}
	}

	/* Flick the MMU and release lock */
	pr_hardcont(" -> %d\n", id);
	set_context(id, next->pgd);
	raw_spin_unlock(&context_lock);
}
Esempio n. 11
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain. Hw
 * interrupts are masked on entry.
 */
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
	struct ipipe_domain *this_domain, *next_domain;
	struct list_head *head, *pos;
	int m_ack, s = -1;

	/*
	 * Software-triggered IRQs do not need any ack.  The contents
	 * of the register frame should only be used when processing
	 * the timer interrupt, but not for handling any other
	 * interrupt.
	 */
	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
	this_domain = __ipipe_current_domain;

	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
				s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
			__ipipe_dispatch_wired(next_domain, irq);
			goto out;
		}
	}

	/* Ack the interrupt. */

	pos = head;
	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline. We also enforce the
	 * additional root stage lock (blackfin-specific).
	 */
	if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
		s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    ipipe_head_cpudom_var(irqpend_himask) == 0)
		goto out;

	__ipipe_walk_pipeline(head);
out:
	if (!s)
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
static int push_pxx_to_hypervisor(struct acpi_processor *_pr)
{
	int ret = 0;
	struct xen_platform_op op = {
		.cmd			= XENPF_set_processor_pminfo,
		.interface_version	= XENPF_INTERFACE_VERSION,
		.u.set_pminfo.id	= _pr->acpi_id,
		.u.set_pminfo.type	= XEN_PM_PX,
	};
	struct xen_processor_performance *dst_perf;
	struct xen_processor_px *dst_states = NULL;

	dst_perf = &op.u.set_pminfo.perf;

	dst_perf->platform_limit = _pr->performance_platform_limit;
	dst_perf->flags |= XEN_PX_PPC;
	xen_copy_pct_data(&(_pr->performance->control_register),
			  &dst_perf->control_register);
	xen_copy_pct_data(&(_pr->performance->status_register),
			  &dst_perf->status_register);
	dst_perf->flags |= XEN_PX_PCT;
	dst_states = xen_copy_pss_data(_pr, dst_perf);
	if (!IS_ERR_OR_NULL(dst_states)) {
		set_xen_guest_handle(dst_perf->states, dst_states);
		dst_perf->flags |= XEN_PX_PSS;
	}
	if (!xen_copy_psd_data(_pr, dst_perf))
		dst_perf->flags |= XEN_PX_PSD;

	if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) {
		pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n",
			_pr->acpi_id, dst_perf->flags);
		ret = -ENODEV;
		goto err_free;
	}

	if (!no_hypercall)
		ret = HYPERVISOR_dom0_op(&op);

	if (!ret) {
		struct acpi_processor_performance *perf;
		unsigned int i;

		perf = _pr->performance;
		pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id);
		for (i = 0; i < perf->state_count; i++) {
			pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
			(i == perf->state ? '*' : ' '), i,
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);
		}
	} else if (ret != -EINVAL)
		pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n",
		       ret, _pr->acpi_id);
err_free:
	if (!IS_ERR_OR_NULL(dst_states))
		kfree(dst_states);

	return ret;
}
static int upload_pm_data(struct acpi_processor *_pr)
{
	int err = 0;

	mutex_lock(&acpi_ids_mutex);
	if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) {
		mutex_unlock(&acpi_ids_mutex);
		return -EBUSY;
	}
	if (_pr->flags.power)
		err = push_cxx_to_hypervisor(_pr);

	if (_pr->performance && _pr->performance->states)
		err |= push_pxx_to_hypervisor(_pr);

	mutex_unlock(&acpi_ids_mutex);
	return err;
}
Esempio n. 13
0
/* Insert the logic block into the volume info */
static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
			      struct ubi_vid_hdr *vh, u32 vol_id,
			      u32 pnum, u32 lnum)
{
	struct ubi_vol_info *vi = ubi->volinfo + vol_id;
	u32 *ltp;

	/*
	 * If the volume is larger than expected, yell and give up :(
	 */
	if (lnum >= UBI_MAX_VOL_LEBS) {
		ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
		return -EINVAL;
	}

	ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
		pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
		!!test_bit(pnum, ubi->scanned));

	/* Points to the translation entry */
	ltp = vi->lebs_to_pebs + lnum;

	/* If the block is already assigned, check sqnum */
	if (__test_and_set_bit(lnum, vi->found)) {
		u32 cur_pnum = *ltp;
		struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;

		/*
		 * If the current block hase not yet been scanned, we
		 * need to do that. The other block might be stale or
		 * the current block corrupted and the FM not yet
		 * updated.
		 */
		if (!test_bit(cur_pnum, ubi->scanned)) {
			/*
			 * If the scan fails, we use the valid block
			 */
			if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
						  lnum)) {
				*ltp = pnum;
				return 0;
			}
		}

		/*
		 * Should not happen ....
		 */
		if (test_bit(cur_pnum, ubi->corrupt)) {
			*ltp = pnum;
			return 0;
		}

		ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
			vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
			be64_to_cpu(vh->sqnum));

		/*
		 * Compare sqnum and take the newer one
		 */
		if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
			*ltp = pnum;
	} else {
		*ltp = pnum;
		if (lnum > vi->last_block)
			vi->last_block = lnum;
	}

	return 0;
}
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
	unsigned int id, cpu = smp_processor_id();
	unsigned long *map;

	/* No lockless fast path .. yet */
	atomic_spin_lock(&context_lock);

#ifndef DEBUG_STEAL_ONLY
	pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
		 cpu, next, next->context.active, next->context.id);
#endif

#ifdef CONFIG_SMP
	/* Mark us active and the previous one not anymore */
	next->context.active++;
	if (prev) {
#ifndef DEBUG_STEAL_ONLY
		pr_devel(" old context %p active was: %d\n",
			 prev, prev->context.active);
#endif
		WARN_ON(prev->context.active < 1);
		prev->context.active--;
	}

 again:
#endif /* CONFIG_SMP */

	/* If we already have a valid assigned context, skip all that */
	id = next->context.id;
	if (likely(id != MMU_NO_CONTEXT))
		goto ctxt_ok;

	/* We really don't have a context, let's try to acquire one */
	id = next_context;
	if (id > last_context)
		id = first_context;
	map = context_map;

	/* No more free contexts, let's try to steal one */
	if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
		if (num_online_cpus() > 1) {
			id = steal_context_smp(id);
			if (id == MMU_NO_CONTEXT)
				goto again;
			goto stolen;
		}
#endif /* CONFIG_SMP */
		id = steal_context_up(id);
		goto stolen;
	}
	nr_free_contexts--;

	/* We know there's at least one free context, try to find it */
	while (__test_and_set_bit(id, map)) {
		id = find_next_zero_bit(map, last_context+1, id);
		if (id > last_context)
			id = first_context;
	}
 stolen:
	next_context = id + 1;
	context_mm[id] = next;
	next->context.id = id;

#ifndef DEBUG_STEAL_ONLY
	pr_devel("[%d] picked up new id %d, nrf is now %d\n",
		 cpu, id, nr_free_contexts);
#endif

	context_check_map();
 ctxt_ok:

	/* If that context got marked stale on this CPU, then flush the
	 * local TLB for it and unmark it before we use it
	 */
	if (test_bit(id, stale_map[cpu])) {
		pr_devel("[%d] flushing stale context %d for mm @%p !\n",
			 cpu, id, next);
		local_flush_tlb_mm(next);

		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
		__clear_bit(id, stale_map[cpu]);
	}

	/* Flick the MMU and release lock */
	set_context(id, next->pgd);
	atomic_spin_unlock(&context_lock);
}
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
			    struct ieee80211_if_init_conf *conf)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
	struct rt2x00_intf *intf = vif_to_intf(conf->vif);
	struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
	struct queue_entry *entry = NULL;
	unsigned int i;

	/*
	 * Don't allow interfaces to be added
	 * the device has disappeared.
	 */
	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) ||
	    !test_bit(DEVICE_STARTED, &rt2x00dev->flags))
		return -ENODEV;

	switch (conf->type) {
	case IEEE80211_IF_TYPE_AP:
		/*
		 * We don't support mixed combinations of
		 * sta and ap interfaces.
		 */
		if (rt2x00dev->intf_sta_count)
			return -ENOBUFS;

		/*
		 * Check if we exceeded the maximum amount
		 * of supported interfaces.
		 */
		if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
			return -ENOBUFS;

		break;
	case IEEE80211_IF_TYPE_STA:
	case IEEE80211_IF_TYPE_IBSS:
		/*
		 * We don't support mixed combinations of
		 * sta and ap interfaces.
		 */
		if (rt2x00dev->intf_ap_count)
			return -ENOBUFS;

		/*
		 * Check if we exceeded the maximum amount
		 * of supported interfaces.
		 */
		if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
			return -ENOBUFS;

		break;
	default:
		return -EINVAL;
	}

	/*
	 * Loop through all beacon queues to find a free
	 * entry. Since there are as much beacon entries
	 * as the maximum interfaces, this search shouldn't
	 * fail.
	 */
	for (i = 0; i < queue->limit; i++) {
		entry = &queue->entries[i];
		if (!__test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
			break;
	}

	if (unlikely(i == queue->limit))
		return -ENOBUFS;

	/*
	 * We are now absolutely sure the interface can be created,
	 * increase interface count and start initialization.
	 */

	if (conf->type == IEEE80211_IF_TYPE_AP)
		rt2x00dev->intf_ap_count++;
	else
		rt2x00dev->intf_sta_count++;

	spin_lock_init(&intf->lock);
	spin_lock_init(&intf->seqlock);
	intf->beacon = entry;

	if (conf->type == IEEE80211_IF_TYPE_AP)
		memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
	memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);

	/*
	 * The MAC adddress must be configured after the device
	 * has been initialized. Otherwise the device can reset
	 * the MAC registers.
	 */
	rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL);

	/*
	 * Some filters depend on the current working mode. We can force
	 * an update during the next configure_filter() run by mac80211 by
	 * resetting the current packet_filter state.
	 */
	rt2x00dev->packet_filter = 0;

	return 0;
}
Esempio n. 16
0
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int key_index;
	int gpio;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
#ifdef CONFIG_ZTE_PLATFORM
	unsigned polarity = !!(gpio_keypad_flags & !GPIOKPF_ACTIVE_HIGH);
#else
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
#endif

	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} else {
		key_index = out * mi->ninputs;
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, !polarity);
		else
			gpio_direction_input(gpio);
		out++;
	}
	kp->current_output = out;
	if (out < mi->noutputs) {
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, polarity);
		else
			gpio_direction_output(gpio, polarity);
		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}
	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
		if (kp->key_state_changed) {
			hrtimer_start(&kp->timer, mi->debounce_delay,
				      HRTIMER_MODE_REL);
			return HRTIMER_NORESTART;
		}
		kp->key_state_changed = kp->last_key_state_changed;
	}
	if (kp->key_state_changed) {
		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
			remove_phantom_keys(kp);
		key_index = 0;
		for (out = 0; out < mi->noutputs; out++)
			for (in = 0; in < mi->ninputs; in++, key_index++)
				report_key(kp, key_index, out, in);
	}
	if (!kp->use_irq || kp->some_keys_pressed) {
		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}

	/* No keys are pressed, reenable interrupt */
	for (out = 0; out < mi->noutputs; out++) {
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(mi->output_gpios[out], polarity);
		else
			gpio_direction_output(mi->output_gpios[out], polarity);
	}
	for (in = 0; in < mi->ninputs; in++)
		enable_irq(gpio_to_irq(mi->input_gpios[in]));
	wake_unlock(&kp->wake_lock);
	return HRTIMER_NORESTART;
}
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int key_index;
	int gpio;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);

	if (kp->timer_starter == ES209RA_KEYPAD_STARTER_IRQ_HANDLER) {
		for (out = 0; out < mi->noutputs; out++)
			gpio_keypad_direction_input(mi->output_gpios[out]);

		kp->timer_starter = ES209RA_KEYPAD_STARTER_OTHER;
	}

	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} else {
		key_index = out * mi->ninputs;
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, !polarity);
		else
			gpio_keypad_direction_input(gpio);
		out++;
	}
	kp->current_output = out;
	if (out < mi->noutputs) {
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, polarity);
		else
			gpio_keypad_direction_output(gpio, polarity);
		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}
	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
		if (kp->key_state_changed) {
			hrtimer_start(&kp->timer, mi->debounce_delay,
				      HRTIMER_MODE_REL);
			return HRTIMER_NORESTART;
		}
		kp->key_state_changed = kp->last_key_state_changed;
	}
	if (kp->key_state_changed) {
		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
			remove_phantom_keys(kp);
		key_index = 0;
		for (out = 0; out < mi->noutputs; out++)
			for (in = 0; in < mi->ninputs; in++, key_index++)
				report_key(kp, key_index, out, in);
	}

	if (kp->use_irq && kp->some_keys_pressed) {
		if ((kp->keys_pressed_time / NSEC_PER_SEC) >=
		    ES209RA_KEYPAD_PRESS_TIMEOUT) {
			/*
			 * Pretend that all keys was released and notify.
			 * This is necessary to suspend.
			 */
			key_index = 0;
			for (out = 0; out < mi->noutputs; out++) {
				for (in = 0;
				     in < mi->ninputs;
				     in++, key_index++) {
					__clear_bit(key_index,
						    kp->keys_pressed);
					report_key(kp, key_index, out, in);
				}
			}

			/* Run timeout process. */
			schedule_work(&kp->work);
		} else {
			kp->keys_pressed_time =
				kp->keys_pressed_time +
				(mi->poll_time.tv.sec * NSEC_PER_SEC) +
				mi->poll_time.tv.nsec;

			hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		}

		return HRTIMER_NORESTART;
	}

	if (!kp->use_irq) {
		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}

	/* No keys are pressed, reenable interrupt */
	for (out = 0; out < mi->noutputs; out++) {
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(mi->output_gpios[out], polarity);
		else
			gpio_keypad_direction_output(mi->output_gpios[out],
						     polarity);
	}
	for (in = 0; in < mi->ninputs; in++)
		enable_irq(gpio_to_irq(mi->input_gpios[in]));
	wake_unlock(&kp->wake_lock);
	return HRTIMER_NORESTART;
}
Esempio n. 18
0
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int key_index;
	int gpio;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
#if defined(SLEEP_STATE_SKIP_LONGKEY)
	unsigned int irq;
#endif
	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} else {
		key_index = out * mi->ninputs;
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, !polarity);
		else
			gpio_direction_input(gpio);
		out++;
	}
	kp->current_output = out;
	if (out < mi->noutputs) {
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, polarity);
		else
			gpio_direction_output(gpio, polarity);
		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}
	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
		if (kp->key_state_changed) {
			hrtimer_start(&kp->timer, mi->debounce_delay,
				      HRTIMER_MODE_REL);
			return HRTIMER_NORESTART;
		}
		kp->key_state_changed = kp->last_key_state_changed;
	}

#if defined(SLEEP_STATE_SKIP_LONGKEY)
	if(long_key_state == LONG_KEY_CHECK_ACTIVE)
	{
		//printk("key event (key_press_count:%d, current_pressed_key:%d)\n", key_press_count, current_pressed_key); // GOPEACE iskim ED25 -- remove log for google security approval
		if(key_press_count++ > MAX_KEY_PRESS_COUNT)
		{
		        key_press_count = 0;

			if(lcd_wake_flag==0) // LCD off
			{
				gpio_set_value(33, 0);	// keyscan_0
				gpio_direction_output(33, 0);

				gpio_set_value(34, 0);	// keyscan_1
				gpio_direction_output(34, 0);

				//gpio_set_value(35, 0);	// keyscan_2
				//gpio_direction_output(35, 0);
				
				if(current_pressed_key == KEY_VOLUMEDOWN)
				{
				        printk("******** volume down long key **********\n");    
					irq = gpio_to_irq(42); // keysense_0		
					set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);
				}
				if(current_pressed_key == KEY_VOLUMEUP)
				{
				        printk("******** volume up long key **********\n");    
					irq = gpio_to_irq(41); // keysense_1		
					set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);
				}
				if(current_pressed_key == KEY_END/* || current_pressed_key == KEY_HOME*/)
				{
				        printk("******** power long key **********\n");  
					irq = gpio_to_irq(40); // keysense_2		
					set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);
				}
					
				for (in = 0; in < mi->ninputs; in++)
					enable_irq(gpio_to_irq(mi->input_gpios[in]));

				wake_unlock(&kp->wake_lock);

				long_key_state = LONG_KEY_PRESSED;

				return HRTIMER_NORESTART;		 
			}
		}
	}
#endif

	if (kp->key_state_changed) {
		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
			remove_phantom_keys(kp);
		key_index = 0;
		for (out = 0; out < mi->noutputs; out++)
			for (in = 0; in < mi->ninputs; in++, key_index++)
				report_key(kp, key_index, out, in);
	}
	if (!kp->use_irq || kp->some_keys_pressed) {
		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}

	/* No keys are pressed, reenable interrupt */
	for (out = 0; out < mi->noutputs; out++) {
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(mi->output_gpios[out], polarity);
		else
			gpio_direction_output(mi->output_gpios[out], polarity);
	}
	for (in = 0; in < mi->ninputs; in++)
		enable_irq(gpio_to_irq(mi->input_gpios[in]));
	wake_unlock(&kp->wake_lock);
	return HRTIMER_NORESTART;
}
Esempio n. 19
0
File: bitmap.c Progetto: tliu526/a5
/**
 * wufs_new_inode: (utility function)
 * Allocate a new inode within a particular directory.
 * Returns error code by reference.
 */
struct inode *wufs_new_inode(const struct inode *dir, int *error)
{
  /* given parent directory, determine the device */
  struct super_block *sb = dir->i_sb;
  struct wufs_sb_info *sbi = wufs_sb(sb);

  /* 
   * allocate a new vfs inode (see linux/fs/inode.c)
   * this calls (indirectly) wufs_alloc_inode (see inode.c)
   */
  struct inode *inode = new_inode(sb);
  struct buffer_head * bh;
  int i;

  /* compute the number of map bits that occur in each block of imap */
  int bits_per_block = 8 * sb->s_blocksize;
  unsigned long ino;

  /* verify that vfs could create an inode */
  if (!inode) { *error = -ENOMEM; return NULL; }

  /* set sentinel values for failed lookup */
  ino = bits_per_block;
  bh = NULL;
  *error = -ENOSPC;
  
  /* lock down bitmap */
  spin_lock(&bitmap_lock);
  for (i = 0; i < sbi->sbi_imap_bcnt; i++) {
    bh = sbi->sbi_imap[i];
    ino = find_first_zero_bit((unsigned long*)bh->b_data, bits_per_block);
    if (ino < bits_per_block) {
      /* found an available inode index */
      break;
    }
  }

  /* 
   * At this point, i is the block number, bh is its buffer_head, and ino,
   * if a reasonable value, is the distance within that block
   * First, some sanity checking:
   */
  if (!bh || ino >= bits_per_block) {
    spin_unlock(&bitmap_lock);

    /* iput is the mechanism for getting vfs to destroy an inode */
    iput(inode);
    return NULL;
  }
  /* we're still locked...set the bit */
  if (__test_and_set_bit(ino, (unsigned long*)bh->b_data)) {
    /* for some reason, the bit was set - shouldn't happen, of course */
    spin_unlock(&bitmap_lock);
    printk("wufs_new_inode: bit already set\n");

    /* iput is the mechanism for getting vfs to destroy an inode */
    iput(inode);
    return NULL;
  }
  spin_unlock(&bitmap_lock);

  /* great - bitmap is set; write it out */
  mark_buffer_dirty(bh);

  /* now compute the actual inode *number* */
  ino += i * bits_per_block + 1;

  /* sanity check */
  if (!ino || ino > sbi->sbi_inodes) {
    printk("wufs_new_inode: attempted to allocate illegal inode number %lu\n",
	   ino);
    iput(inode);
    return NULL;
  }

  /* fill out vfs inode fields */
  inode->i_uid = current_fsuid(); /* see <linux/cred.h> */
  inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current_fsgid();
  inode->i_ino = ino;

  /*
   * remember: we can't call time(2), so we grab kernel time
   * (see ~/linux/kernel/timekeeping.c, get_seconds)
   */
  inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;

  /* initialize all data & size fields */
  inode->i_blocks = 0;
  for (i = 0; i < WUFS_INODE_BPTRS; i++) {
    wufs_i(inode)->ini_data[i] = 0;
  }

  /* insert this into the inode hash table (for fast lookup)
   * (see <linux/fs.h> and linux/fs/inode.c)
   */
  insert_inode_hash(inode);

  /* flush the inode changes to disk */
  mark_inode_dirty(inode);

  /* made it: clear pessimistic error reference */
  *error = 0;
  return inode;
}
/*
 * Parse authenticated attributes.
 */
int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen,
				      unsigned char tag,
				      const void *value, size_t vlen)
{
	struct pkcs7_parse_context *ctx = context;
	struct pkcs7_signed_info *sinfo = ctx->sinfo;
	enum OID content_type;

	pr_devel("AuthAttr: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value);

	switch (ctx->last_oid) {
	case OID_contentType:
		if (__test_and_set_bit(sinfo_has_content_type, &sinfo->aa_set))
			goto repeated;
		content_type = look_up_OID(value, vlen);
		if (content_type != ctx->msg->data_type) {
			pr_warn("Mismatch between global data type (%d) and sinfo %u (%d)\n",
				ctx->msg->data_type, sinfo->index,
				content_type);
			return -EBADMSG;
		}
		return 0;

	case OID_signingTime:
		if (__test_and_set_bit(sinfo_has_signing_time, &sinfo->aa_set))
			goto repeated;
		/* Should we check that the signing time is consistent
		 * with the signer's X.509 cert?
		 */
		return x509_decode_time(&sinfo->signing_time,
					hdrlen, tag, value, vlen);

	case OID_messageDigest:
		if (__test_and_set_bit(sinfo_has_message_digest, &sinfo->aa_set))
			goto repeated;
		if (tag != ASN1_OTS)
			return -EBADMSG;
		sinfo->msgdigest = value;
		sinfo->msgdigest_len = vlen;
		return 0;

	case OID_smimeCapabilites:
		if (__test_and_set_bit(sinfo_has_smime_caps, &sinfo->aa_set))
			goto repeated;
		if (ctx->msg->data_type != OID_msIndirectData) {
			pr_warn("S/MIME Caps only allowed with Authenticode\n");
			return -EKEYREJECTED;
		}
		return 0;

		/* Microsoft SpOpusInfo seems to be contain cont[0] 16-bit BE
		 * char URLs and cont[1] 8-bit char URLs.
		 *
		 * Microsoft StatementType seems to contain a list of OIDs that
		 * are also used as extendedKeyUsage types in X.509 certs.
		 */
	case OID_msSpOpusInfo:
		if (__test_and_set_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))
			goto repeated;
		goto authenticode_check;
	case OID_msStatementType:
		if (__test_and_set_bit(sinfo_has_ms_statement_type, &sinfo->aa_set))
			goto repeated;
	authenticode_check:
		if (ctx->msg->data_type != OID_msIndirectData) {
			pr_warn("Authenticode AuthAttrs only allowed with Authenticode\n");
			return -EKEYREJECTED;
		}
		/* I'm not sure how to validate these */
		return 0;
	default:
		return 0;
	}

repeated:
	/* We permit max one item per AuthenticatedAttribute and no repeats */
	pr_warn("Repeated/multivalue AuthAttrs not permitted\n");
	return -EKEYREJECTED;
}
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int pout;	// multikey
	int key_index;
	int gpio;
	unsigned int irq;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);

	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} else {
		key_index = out * mi->ninputs;
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		// multikey
		for( pout = 0; pout < mi->noutputs; pout++ )
		{
                       if( out != pout )
                               gpio_direction_output(mi->output_gpios[pout], !polarity);
		}

		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, !polarity);
		else
			gpio_direction_input(gpio);
		out++;
	}
	kp->current_output = out;
	if (out < mi->noutputs) {
		gpio = mi->output_gpios[out];
		// multikey
		for( pout = 0; pout < mi->noutputs; pout++ )
                       if( out != pout )
                               gpio_direction_input(mi->output_gpios[pout]);

			
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, polarity);
		else
			gpio_direction_output(gpio, polarity);
		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}
	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
		if (kp->key_state_changed) {
			hrtimer_start(&kp->timer, mi->debounce_delay,
				      HRTIMER_MODE_REL);
			return HRTIMER_NORESTART;
		}
		kp->key_state_changed = kp->last_key_state_changed;
	}

#if defined(CONFIG_MACH_COOPER_BASE_KOR)
    if(long_key_state == LONG_KEY_CHECK_ACTIVE)
    {
      if(key_press_count++ > MAX_KEY_PRESS_COUNT)
      {
  			gpio_set_value(GPIO_KEY_SCAN, 0);  // kbc0
          gpio_direction_output(GPIO_KEY_SCAN, 0);

#if defined(CONFIG_MACH_JUNO_SKT) || defined(CONFIG_MACH_JUNO_KT)
 			gpio_set_value(GPIO_KEY_SCAN1, 0);  // kbc1
          gpio_direction_output(GPIO_KEY_SCAN1, 0);
#endif
          
          if(!gpio_get_value(GPIO_VOLUME_UP) && !gpio_get_value(GPIO_VOLUME_DOWN))
          {
              irq = gpio_to_irq(GPIO_VOLUME_UP);    			
              set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);          
              irq = gpio_to_irq(GPIO_VOLUME_DOWN);    			
              set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);
          }
          else if (!gpio_get_value(GPIO_VOLUME_UP)) // volume-up
          {
              irq = gpio_to_irq(GPIO_VOLUME_UP);    			
              set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);
          }
          else if(!gpio_get_value(GPIO_VOLUME_DOWN))  // volume-down
          {
              irq = gpio_to_irq(GPIO_VOLUME_DOWN);    			
              set_irq_type(irq , IRQ_TYPE_LEVEL_HIGH);
          }    

      	   for (in = 0; in < mi->ninputs; in++)
      		  enable_irq(gpio_to_irq(mi->input_gpios[in]));
      	   wake_unlock(&kp->wake_lock);

      	   key_press_count = 0;
      	   long_key_state = LONG_KEY_PRESSED;
        
      	   return HRTIMER_NORESTART;        
      }
    }
#endif

	if (kp->key_state_changed) {
		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
			remove_phantom_keys(kp);
		key_index = 0;
		for (out = 0; out < mi->noutputs; out++)
			for (in = 0; in < mi->ninputs; in++, key_index++)
				report_key(kp, key_index, out, in);
	}
	if (!kp->use_irq || kp->some_keys_pressed) {
		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}

	/* No keys are pressed, reenable interrupt */
	for (out = 0; out < mi->noutputs; out++) {
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(mi->output_gpios[out], polarity);
		else
			gpio_direction_output(mi->output_gpios[out], polarity);
	}
	for (in = 0; in < mi->ninputs; in++)
		enable_irq(gpio_to_irq(mi->input_gpios[in]));
	wake_unlock(&kp->wake_lock);
	return HRTIMER_NORESTART;
}
Esempio n. 22
0
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
{
	int out, in;
	int key_index;
	int gpio;
	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
	struct gpio_event_matrix_info *mi = kp->keypad_info;
	unsigned gpio_keypad_flags = mi->flags;
	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);

	out = kp->current_output;
	if (out == mi->noutputs) {
		out = 0;
		kp->last_key_state_changed = kp->key_state_changed;
		kp->key_state_changed = 0;
		kp->some_keys_pressed = 0;
	} 
	else {
		key_index = out * mi->ninputs;
#if defined (CONFIG_MACH_ROOKIE) || defined(CONFIG_MACH_ESCAPE) || defined(CONFIG_MACH_GIO)
		for (in = 0; in < mi->ninputs-2; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
#if defined (CONFIG_MACH_ROOKIE) //MB ysahn 2011.04.17 - Main Key Overlap Handling
				if(key_index == BACK_KEY_INDEX && test_bit(HOME_KEY_INDEX,kp->keys_pressed))
				{
					kp->key_state_changed |= __test_and_set_bit(HOME_KEY_INDEX, kp->keys_pressed);
				}
				else if(key_index == HOME_KEY_INDEX && test_bit(BACK_KEY_INDEX,kp->keys_pressed))
				{
					kp->key_state_changed |= __test_and_set_bit(BACK_KEY_INDEX, kp->keys_pressed);
				}
				else
#endif //MB ysahn 2011.04.17 - Main Key Overlap Handling
				{
					kp->key_state_changed |= !__test_and_set_bit(key_index, kp->keys_pressed);
					//printk("kp->keys_pressed[0] : %d\n",in,kp->keys_pressed[0]);
	 				//printk("key event (key gpio:%d pressed, !polarity : %d)\n", gpio, !polarity);
				}
			} else
				kp->key_state_changed |= __test_and_clear_bit(key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];

		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
		{
			gpio_set_value(gpio, !polarity);
		}
		else
			gpio_direction_input(gpio);

		if (out == 0)  // for volume up/down key
		{
            static unsigned int     volume_pressed = 0 ;
			for (in ; in < mi->ninputs; in++, key_index++) {
				gpio = mi->input_gpios[in];
				if (gpio_get_value(gpio) == 0) { /* pressed */
                    if (volume_pressed == (1 << in) || !volume_pressed)    /* only this key */
                    {
    					if (kp->some_keys_pressed < 3)
    						kp->some_keys_pressed++;
    					kp->key_state_changed |= !__test_and_set_bit(key_index, kp->keys_pressed);
                        volume_pressed |= (1 << in) ;
    					//printk("key event (key gpio:%d, pressed)\n", gpio);
                    }
				} else
				{
					kp->key_state_changed |= __test_and_clear_bit(key_index, kp->keys_pressed);
                    volume_pressed &= ~(1 << in) ;
				}
			}
		}
#else
		for (in = 0; in < mi->ninputs; in++, key_index++) {
			gpio = mi->input_gpios[in];
			if (gpio_get_value(gpio) ^ !polarity) {
				if (kp->some_keys_pressed < 3)
					kp->some_keys_pressed++;
				kp->key_state_changed |= !__test_and_set_bit(
						key_index, kp->keys_pressed);
 				//printk("key event (key gpio:%d pressed, polarity : %d)\n", gpio, polarity);
			} else
				kp->key_state_changed |= __test_and_clear_bit(
						key_index, kp->keys_pressed);
		}
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, !polarity);
		else
			gpio_direction_input(gpio);
#endif
		out++;
	}
	kp->current_output = out;

	if (out < mi->noutputs) {
		gpio = mi->output_gpios[out];
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(gpio, polarity);
		else
			gpio_direction_output(gpio, polarity);
		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}
	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
		if (kp->key_state_changed) {
			hrtimer_start(&kp->timer, mi->debounce_delay,
				      HRTIMER_MODE_REL);
			return HRTIMER_NORESTART;
		}
		kp->key_state_changed = kp->last_key_state_changed;
	}
	if (kp->key_state_changed) {
		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
			remove_phantom_keys(kp);
		key_index = 0;
		for (out = 0; out < mi->noutputs; out++)
			for (in = 0; in < mi->ninputs; in++, key_index++)
			{
				report_key(kp, key_index, out, in);
			}
	}
	if (!kp->use_irq || kp->some_keys_pressed) {
		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
		return HRTIMER_NORESTART;
	}

	/* No keys are pressed, reenable interrupt */
	for (out = 0; out < mi->noutputs; out++) {
		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
			gpio_set_value(mi->output_gpios[out], polarity);
		else
			gpio_direction_output(mi->output_gpios[out], polarity);
	}
	for (in = 0; in < mi->ninputs; in++)
		enable_irq(gpio_to_irq(mi->input_gpios[in]));
	wake_unlock(&kp->wake_lock);
	return HRTIMER_NORESTART;
}
Esempio n. 23
0
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
	unsigned int i, id, cpu = smp_processor_id();
	unsigned long *map;

	
	raw_spin_lock(&context_lock);

	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
		cpu, next, next->context.active, next->context.id);

#ifdef CONFIG_SMP
	
	next->context.active++;
	if (prev) {
		pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
		WARN_ON(prev->context.active < 1);
		prev->context.active--;
	}

 again:
#endif 

	
	id = next->context.id;
	if (likely(id != MMU_NO_CONTEXT)) {
#ifdef DEBUG_MAP_CONSISTENCY
		if (context_mm[id] != next)
			pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
			       next, id, id, context_mm[id]);
#endif
		goto ctxt_ok;
	}

	
	id = next_context;
	if (id > last_context)
		id = first_context;
	map = context_map;

	
	if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
		if (num_online_cpus() > 1) {
			id = steal_context_smp(id);
			if (id == MMU_NO_CONTEXT)
				goto again;
			goto stolen;
		}
#endif 
		id = steal_context_up(id);
		goto stolen;
	}
	nr_free_contexts--;

	
	while (__test_and_set_bit(id, map)) {
		id = find_next_zero_bit(map, last_context+1, id);
		if (id > last_context)
			id = first_context;
	}
 stolen:
	next_context = id + 1;
	context_mm[id] = next;
	next->context.id = id;
	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);

	context_check_map();
 ctxt_ok:

	if (test_bit(id, stale_map[cpu])) {
		pr_hardcont(" | stale flush %d [%d..%d]",
			    id, cpu_first_thread_sibling(cpu),
			    cpu_last_thread_sibling(cpu));

		local_flush_tlb_mm(next);

		
		for (i = cpu_first_thread_sibling(cpu);
		     i <= cpu_last_thread_sibling(cpu); i++) {
			__clear_bit(id, stale_map[i]);
		}
	}

	
	pr_hardcont(" -> %d\n", id);
	set_context(id, next->pgd);
	raw_spin_unlock(&context_lock);
}