Пример #1
0
static int qos_del_qdisc(ASF_uint32_t  ulVsgId,
			ASFQOSDeleteQdisc_t *qdisc)
{
	struct  asf_qdisc *root;
	uint32_t i;
	int	bLockFlag;

	root = qdisc->dev->asf_qdisc;
	if (!root) {
		asf_err("Qdisc not exists.\n");
		return ASFQOS_FAILURE;
	}

	if (qdisc->qdisc_type != ASF_QDISC_TBF) {
		asf_err("Unsupported Qdisc Delete Command.\n");
		return ASFQOS_SUCCESS;
	}
	ASF_RCU_READ_LOCK(bLockFlag);
	/* If root Qdisc is TBF then simply delete Shaper */
	if (root->qdisc_type == ASF_QDISC_TBF) {
		root->dev->asf_qdisc = NULL;
		kfree(root->priv);
		del_timer(&(root->timer));
		/* NAPI */
		napi_disable(&(root->qos_napi));
		netif_napi_del(&(root->qos_napi));

		if (root->pShaper)
			kfree(root->pShaper);
		for (i = 0; i < ASF_MAX_IFACES; i++) {
			if (qdisc_in_use[i] == root) {
				spin_lock(&cnt_lock);
				qdisc_in_use[i] = NULL;
				qdisc_cnt--;
				spin_unlock(&cnt_lock);
				asf_debug("Deleted Qdisc at index %d, qdisc_cnt %d\n",
					i, qdisc_cnt);
				break;
			}
		}
		kfree(root);
	} else {
		/* If Root is not TBF , it means we have
		configured any SCHEDULER over SHAPER */
		if (qdisc->parent != ROOT_ID) {
			/* Should be Queue Level Shaper */
			switch (root->qdisc_type) {
			case ASF_QDISC_PRIO:
			case ASF_QDISC_PRIO_DRR:
			case ASF_QDISC_DRR:
			{
				struct  asf_prio_sched_data *root_priv;
				/* Find out the Queue, to which shaper need to apply */
				i = qdisc->parent & MINOR_ID;
				i--; /* Index value */

				/* Deleting Per Queue Shaper */
				root_priv = root->priv;
				if (root_priv->q[i].shaper) {
					root_priv->q[i].shaper =  NULL;
					kfree(root_priv->q[i].shaper);
				}

			}
			break;
			default:
				asf_err("Ohh.., Unsupported Parent Qdisc\n");
			}

			ASF_RCU_READ_UNLOCK(bLockFlag);
			return ASFQOS_SUCCESS;
		}
		/* ELSE Request for Deleting Root TBF Shaper */
		/* Delete Inactive Shaper Qdisc */
		for (i = 0; i < ASF_MAX_IFACES; i++) {
			struct  asf_qdisc *x = qdisc_in_use[i];
			if (!x)
				continue;
			if ((x->handle == qdisc->handle) &&
					(x->dev == qdisc->dev)) {
				spin_lock(&cnt_lock);
				qdisc_in_use[i] = NULL;
				qdisc_cnt--;
				spin_unlock(&cnt_lock);
				asf_debug("Deleted Qdisc at index %d, qdisc_cnt %d\n",
					i, qdisc_cnt);
				kfree(x);
				break;
			}
		}
		/* Now delete Child Qdisc */
		qos_flush_qdisc(ulVsgId, qdisc);
	}
	ASF_RCU_READ_UNLOCK(bLockFlag);
	return ASFQOS_SUCCESS;
}
Пример #2
0
void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
			 size_t len, struct ieee80211_rx_status *rx_status)
{
	struct ieee80211_local *local = sdata->local;
	struct ieee802_11_elems elems;
	struct sta_info *sta;
	enum plink_event event;
	enum plink_frame_type ftype;
	size_t baselen;
	u8 ie_len;
	u8 *baseaddr;
	__le16 plid, llid, reason;

	/* need action_code, aux */
	if (len < IEEE80211_MIN_ACTION_SIZE + 3)
		return;

	if (is_multicast_ether_addr(mgmt->da)) {
		mpl_dbg("Mesh plink: ignore frame from multicast address");
		return;
	}

	baseaddr = mgmt->u.action.u.plink_action.variable;
	baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
	if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
		baseaddr += 4;
		baselen += 4;
	}
	ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
	if (!elems.peer_link) {
		mpl_dbg("Mesh plink: missing necessary peer link ie\n");
		return;
	}

	ftype = *((u8 *)PLINK_GET_FRAME_SUBTYPE(elems.peer_link));
	ie_len = elems.peer_link_len;
	if ((ftype == PLINK_OPEN && ie_len != 3) ||
	    (ftype == PLINK_CONFIRM && ie_len != 5) ||
	    (ftype == PLINK_CLOSE && ie_len != 5 && ie_len != 7)) {
		mpl_dbg("Mesh plink: incorrect plink ie length\n");
		return;
	}

	if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) {
		mpl_dbg("Mesh plink: missing necessary ie\n");
		return;
	}
	/* Note the lines below are correct, the llid in the frame is the plid
	 * from the point of view of this host.
	 */
	memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
	if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 7))
		memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);

	rcu_read_lock();

	sta = sta_info_get(local, mgmt->sa);
	if (!sta && ftype != PLINK_OPEN) {
		mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
		rcu_read_unlock();
		return;
	}

	if (sta && sta->plink_state == PLINK_BLOCKED) {
		rcu_read_unlock();
		return;
	}

	/* Now we will figure out the appropriate event... */
	event = PLINK_UNDEFINED;
	if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
		switch (ftype) {
		case PLINK_OPEN:
			event = OPN_RJCT;
			break;
		case PLINK_CONFIRM:
			event = CNF_RJCT;
			break;
		case PLINK_CLOSE:
			/* avoid warning */
			break;
		}
		spin_lock_bh(&sta->lock);
	} else if (!sta) {
		/* ftype == PLINK_OPEN */
		u32 rates;

		rcu_read_unlock();

		if (!mesh_plink_free_count(sdata)) {
			mpl_dbg("Mesh plink error: no more free plinks\n");
			return;
		}

		rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
		sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
		if (!sta) {
			mpl_dbg("Mesh plink error: plink table full\n");
			return;
		}
		if (sta_info_insert_rcu(sta)) {
			rcu_read_unlock();
			return;
		}
		event = OPN_ACPT;
		spin_lock_bh(&sta->lock);
	} else {
		spin_lock_bh(&sta->lock);
		switch (ftype) {
		case PLINK_OPEN:
			if (!mesh_plink_free_count(sdata) ||
			    (sta->plid && sta->plid != plid))
				event = OPN_IGNR;
			else
				event = OPN_ACPT;
			break;
		case PLINK_CONFIRM:
			if (!mesh_plink_free_count(sdata) ||
			    (sta->llid != llid || sta->plid != plid))
				event = CNF_IGNR;
			else
				event = CNF_ACPT;
			break;
		case PLINK_CLOSE:
			if (sta->plink_state == PLINK_ESTAB)
				/* Do not check for llid or plid. This does not
				 * follow the standard but since multiple plinks
				 * per sta are not supported, it is necessary in
				 * order to avoid a livelock when MP A sees an
				 * establish peer link to MP B but MP B does not
				 * see it. This can be caused by a timeout in
				 * B's peer link establishment or B beign
				 * restarted.
				 */
				event = CLS_ACPT;
			else if (sta->plid != plid)
				event = CLS_IGNR;
			else if (ie_len == 7 && sta->llid != llid)
				event = CLS_IGNR;
			else
				event = CLS_ACPT;
			break;
		default:
			mpl_dbg("Mesh plink: unknown frame subtype\n");
			spin_unlock_bh(&sta->lock);
			rcu_read_unlock();
			return;
		}
	}

	mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %d %d %d %d\n",
		mgmt->sa, sta->plink_state,
		le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
		event);
	reason = 0;
	switch (sta->plink_state) {
		/* spin_unlock as soon as state is updated at each case */
	case PLINK_LISTEN:
		switch (event) {
		case CLS_ACPT:
			mesh_plink_fsm_restart(sta);
			spin_unlock_bh(&sta->lock);
			break;
		case OPN_ACPT:
			sta->plink_state = PLINK_OPN_RCVD;
			sta->plid = plid;
			get_random_bytes(&llid, 2);
			sta->llid = llid;
			mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
					    0, 0);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
					    llid, plid, 0);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_OPN_SNT:
		switch (event) {
		case OPN_RJCT:
		case CNF_RJCT:
			reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
		case CLS_ACPT:
			if (!reason)
				reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			sta->plink_state = PLINK_HOLDING;
			if (!mod_plink_timer(sta,
					     dot11MeshHoldingTimeout(sdata)))
				sta->ignore_plink_timer = true;

			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			/* retry timer is left untouched */
			sta->plink_state = PLINK_OPN_RCVD;
			sta->plid = plid;
			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		case CNF_ACPT:
			sta->plink_state = PLINK_CNF_RCVD;
			if (!mod_plink_timer(sta,
					     dot11MeshConfirmTimeout(sdata)))
				sta->ignore_plink_timer = true;

			spin_unlock_bh(&sta->lock);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_OPN_RCVD:
		switch (event) {
		case OPN_RJCT:
		case CNF_RJCT:
			reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
		case CLS_ACPT:
			if (!reason)
				reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			sta->plink_state = PLINK_HOLDING;
			if (!mod_plink_timer(sta,
					     dot11MeshHoldingTimeout(sdata)))
				sta->ignore_plink_timer = true;

			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		case CNF_ACPT:
			del_timer(&sta->plink_timer);
			sta->plink_state = PLINK_ESTAB;
			mesh_plink_inc_estab_count(sdata);
			spin_unlock_bh(&sta->lock);
			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
				sta->sta.addr);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_CNF_RCVD:
		switch (event) {
		case OPN_RJCT:
		case CNF_RJCT:
			reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
		case CLS_ACPT:
			if (!reason)
				reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			sta->plink_state = PLINK_HOLDING;
			if (!mod_plink_timer(sta,
					     dot11MeshHoldingTimeout(sdata)))
				sta->ignore_plink_timer = true;

			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			del_timer(&sta->plink_timer);
			sta->plink_state = PLINK_ESTAB;
			mesh_plink_inc_estab_count(sdata);
			spin_unlock_bh(&sta->lock);
			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
				sta->sta.addr);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_ESTAB:
		switch (event) {
		case CLS_ACPT:
			reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			__mesh_plink_deactivate(sta);
			sta->plink_state = PLINK_HOLDING;
			llid = sta->llid;
			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;
	case PLINK_HOLDING:
		switch (event) {
		case CLS_ACPT:
			if (del_timer(&sta->plink_timer))
				sta->ignore_plink_timer = 1;
			mesh_plink_fsm_restart(sta);
			spin_unlock_bh(&sta->lock);
			break;
		case OPN_ACPT:
		case CNF_ACPT:
		case OPN_RJCT:
		case CNF_RJCT:
			llid = sta->llid;
			reason = sta->reason;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
					    llid, plid, reason);
			break;
		default:
			spin_unlock_bh(&sta->lock);
		}
		break;
	default:
		/* should not get here, PLINK_BLOCKED is dealt with at the
		 * beggining of the function
		 */
		spin_unlock_bh(&sta->lock);
		break;
	}

	rcu_read_unlock();
}
Пример #3
0
void irlmp_stop_idle_timer(struct lap_cb *self) 
{
	/* If timer is activated, kill it! */
	del_timer(&self->idle_timer);
}
Пример #4
0
void
hfc2bds0_interrupt(struct IsdnCardState *cs, u_char val)
{
       	u_char exval;
       	struct BCState *bcs;
	int count=15;
	long flags;

	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "HFCD irq %x %s", val,
			test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
			"locked" : "unlocked");
	val &= cs->hw.hfcD.int_m1;
	if (val & 0x40) { /* TE state machine irq */
		exval = cs->readisac(cs, HFCD_STATES) & 0xf;
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "ph_state chg %d->%d", cs->ph_state,
				exval);
		cs->ph_state = exval;
		sched_event_D(cs, D_L1STATECHANGE);
		val &= ~0x40;
	}
	while (val) {
		save_flags(flags);
		cli();
		if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
			cs->hw.hfcD.int_s1 |= val;
			restore_flags(flags);
			return;
		}
		if (cs->hw.hfcD.int_s1 & 0x18) {
			exval = val;
			val =  cs->hw.hfcD.int_s1;
			cs->hw.hfcD.int_s1 = exval;
		}	
		if (val & 0x08) {
			if (!(bcs=Sel_BCS(cs, 0))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x08 IRQ");
			} else 
				main_rec_2bds0(bcs);
		}
		if (val & 0x10) {
			if (!(bcs=Sel_BCS(cs, 1))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x10 IRQ");
			} else 
				main_rec_2bds0(bcs);
		}
		if (val & 0x01) {
			if (!(bcs=Sel_BCS(cs, 0))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x01 IRQ");
			} else {
				if (bcs->tx_skb) {
					if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
						hfc_fill_fifo(bcs);
						test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
					} else
						debugl1(cs,"fill_data %d blocked", bcs->channel);
				} else {
					if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
						if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
							hfc_fill_fifo(bcs);
							test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
						} else
							debugl1(cs,"fill_data %d blocked", bcs->channel);
					} else {
						hfc_sched_event(bcs, B_XMTBUFREADY);
					}
				}
			}
		}
		if (val & 0x02) {
			if (!(bcs=Sel_BCS(cs, 1))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x02 IRQ");
			} else {
				if (bcs->tx_skb) {
					if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
						hfc_fill_fifo(bcs);
						test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
					} else
						debugl1(cs,"fill_data %d blocked", bcs->channel);
				} else {
					if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
						if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
							hfc_fill_fifo(bcs);
							test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
						} else
							debugl1(cs,"fill_data %d blocked", bcs->channel);
					} else {
						hfc_sched_event(bcs, B_XMTBUFREADY);
					}
				}
			}
		}
		if (val & 0x20) {	/* receive dframe */
			receive_dmsg(cs);
		}
		if (val & 0x04) {	/* dframe transmitted */
			if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
				del_timer(&cs->dbusytimer);
			if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
				sched_event_D(cs, D_CLEARBUSY);
			if (cs->tx_skb)
				if (cs->tx_skb->len) {
					if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
						hfc_fill_dfifo(cs);
						test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
					} else {
						debugl1(cs, "hfc_fill_dfifo irq blocked");
					}
					goto afterXPR;
				} else {
					dev_kfree_skb(cs->tx_skb, FREE_WRITE);
					cs->tx_cnt = 0;
					cs->tx_skb = NULL;
				}
			if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
				cs->tx_cnt = 0;
				if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
					hfc_fill_dfifo(cs);
					test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
				} else {
					debugl1(cs, "hfc_fill_dfifo irq blocked");
				}
			} else
				sched_event_D(cs, D_XMTBUFREADY);
		}
      afterXPR:
		if (cs->hw.hfcD.int_s1 && count--) {
			val = cs->hw.hfcD.int_s1;
			cs->hw.hfcD.int_s1 = 0;
			if (cs->debug & L1_DEB_ISAC)
				debugl1(cs, "HFCD irq %x loop %d", val, 15-count);
		} else
			val = 0;
		restore_flags(flags);
	}
}
Пример #5
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/* Verify that there is actually a CF in the slot. If not, then
	 * bail out back to the idle state and wake up all the waiters */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/* Drop all pending requests */
		while ((req = elv_next_request(ace->queue)) != NULL)
			end_request(req, 0);

		/* Drop back to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = &ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(&ace->cf_id);
		ace_dump_mem(&ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occured, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd, ace->cf_id.lba_capacity);
			dev_info(ace->dev, "capacity: %i sectors\n",
				 ace->cf_id.lba_capacity);
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
			(unsigned long long) req->sector, req->hard_nr_sectors,
			req->current_nr_sectors, rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);

		count = req->hard_nr_sectors;
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->req->current_nr_sectors * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->req->current_nr_sectors * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		if (__blk_end_request(ace->req, 0,
					blk_rq_cur_bytes(ace->req))) {
			/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
			 *      ace->req->hard_nr_sectors,
			 *      ace->req->current_nr_sectors);
			 */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = ace->req->current_nr_sectors * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
Пример #6
0
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tc_sfq_qopt *ctl = nla_data(opt);
	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
	unsigned int qlen;
	struct red_parms *p = NULL;

	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
		return -EINVAL;
	if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
		ctl_v1 = nla_data(opt);
	if (ctl->divisor &&
	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
		return -EINVAL;
	if (ctl_v1 && ctl_v1->qth_min) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (!p)
			return -ENOMEM;
	}
	sch_tree_lock(sch);
	if (ctl->quantum) {
		q->quantum = ctl->quantum;
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	}
	q->perturb_period = ctl->perturb_period * HZ;
	if (ctl->flows)
		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
	if (ctl->divisor) {
		q->divisor = ctl->divisor;
		q->maxflows = min_t(u32, q->maxflows, q->divisor);
	}
	if (ctl_v1) {
		if (ctl_v1->depth)
			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
		if (p) {
			swap(q->red_parms, p);
			red_set_parms(q->red_parms,
				      ctl_v1->qth_min, ctl_v1->qth_max,
				      ctl_v1->Wlog,
				      ctl_v1->Plog, ctl_v1->Scell_log,
				      NULL,
				      ctl_v1->max_P);
		}
		q->flags = ctl_v1->flags;
		q->headdrop = ctl_v1->headdrop;
	}
	if (ctl->limit) {
		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
		q->maxflows = min_t(u32, q->maxflows, q->limit);
	}

	qlen = sch->q.qlen;
	while (sch->q.qlen > q->limit)
		sfq_drop(sch);
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
		q->perturbation = net_random();
	}
	sch_tree_unlock(sch);
	kfree(p);
	return 0;
}
static void stop_timer(void)
{
	del_timer(&system_off_timer);
}
Пример #8
0
void
athr_gmac_timer_del(athr_gmac_t *mac)
{
   if(enable_timer)
      del_timer(&mac->mac_phy_timer);
}
Пример #9
0
/* SCA: RCR+ must supply id, len and data
   SCN: RCR- must supply code, id, len and data
   STA: RTR must supply id
   SCJ: RUC must supply CP packet len and data */
static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
			 u8 id, unsigned int len, const void *data)
{
	int old_state, action;
	struct ppp *ppp = get_ppp(dev);
	struct proto *proto = get_proto(dev, pid);

	old_state = proto->state;
	BUG_ON(old_state >= STATES);
	BUG_ON(event >= EVENTS);

#if DEBUG_STATE
	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
	       proto_name(pid), event_names[event], state_names[proto->state]);
#endif

	action = cp_table[event][old_state];

	proto->state = action & STATE_MASK;
	if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
		mod_timer(&proto->timer, proto->timeout =
			  jiffies + ppp->req_timeout * HZ);
	if (action & ZRC)
		proto->restart_counter = 0;
	if (action & IRC)
		proto->restart_counter = (proto->state == STOPPING) ?
			ppp->term_retries : ppp->cr_retries;

	if (action & SCR)	/* send Configure-Request */
		ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
			  0, NULL);
	if (action & SCA)	/* send Configure-Ack */
		ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
	if (action & SCN)	/* send Configure-Nak/Reject */
		ppp_tx_cp(dev, pid, code, id, len, data);
	if (action & STR)	/* send Terminate-Request */
		ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
	if (action & STA)	/* send Terminate-Ack */
		ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
	if (action & SCJ)	/* send Code-Reject */
		ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);

	if (old_state != OPENED && proto->state == OPENED) {
		netdev_info(dev, "%s up\n", proto_name(pid));
		if (pid == PID_LCP) {
			netif_dormant_off(dev);
			ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
			ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
			ppp->last_pong = jiffies;
			mod_timer(&proto->timer, proto->timeout =
				  jiffies + ppp->keepalive_interval * HZ);
		}
	}
	if (old_state == OPENED && proto->state != OPENED) {
		netdev_info(dev, "%s down\n", proto_name(pid));
		if (pid == PID_LCP) {
			netif_dormant_on(dev);
			ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
			ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
		}
	}
	if (old_state != CLOSED && proto->state == CLOSED)
		del_timer(&proto->timer);

#if DEBUG_STATE
	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
	       proto_name(pid), event_names[event], state_names[proto->state]);
#endif
}
Пример #10
0
static void  __exit jiffies_exit(void)
{
	del_timer(&jiffies_timer);
}
Пример #11
0
static void
hfc4s8s_bh(struct work_struct *work)
{
	hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
	u_char b;
	struct hfc4s8s_l1 *l1p;
	volatile u_char *fifo_stat;
	int idx;

	/* handle layer 1 state changes */
	b = 1;
	l1p = hw->l1;
	while (b) {
		if ((b & hw->mr.r_irq_statech)) {
			/* reset l1 event */
			hw->mr.r_irq_statech &= ~b;
			if (l1p->enabled) {
				if (l1p->nt_mode) {
					u_char oldstate = l1p->l1_state;

					Write_hfc8(l1p->hw, R_ST_SEL,
						   l1p->st_num);
					l1p->l1_state =
					    Read_hfc8(l1p->hw,
						      A_ST_RD_STA) & 0xf;

					if ((oldstate == 3)
					    && (l1p->l1_state != 3))
						l1p->d_if.ifc.l1l2(&l1p->
								   d_if.
								   ifc,
								   PH_DEACTIVATE
								   |
								   INDICATION,
								   NULL);

					if (l1p->l1_state != 2) {
						del_timer(&l1p->l1_timer);
						if (l1p->l1_state == 3) {
							l1p->d_if.ifc.
							    l1l2(&l1p->
								 d_if.ifc,
								 PH_ACTIVATE
								 |
								 INDICATION,
								 NULL);
						}
					} else {
						/* allow transition */
						Write_hfc8(hw, A_ST_WR_STA,
							   M_SET_G2_G3);
						mod_timer(&l1p->l1_timer,
							  jiffies +
							  L1_TIMER_T1);
					}
					printk(KERN_INFO
					       "HFC-4S/8S: NT ch %d l1 state %d -> %d\n",
					       l1p->st_num, oldstate,
					       l1p->l1_state);
				} else {
					u_char oldstate = l1p->l1_state;

					Write_hfc8(l1p->hw, R_ST_SEL,
						   l1p->st_num);
					l1p->l1_state =
					    Read_hfc8(l1p->hw,
						      A_ST_RD_STA) & 0xf;

					if (((l1p->l1_state == 3) &&
					     ((oldstate == 7) ||
					      (oldstate == 8))) ||
					    ((timer_pending
					      (&l1p->l1_timer))
					     && (l1p->l1_state == 8))) {
						mod_timer(&l1p->l1_timer,
							  L1_TIMER_T4 +
							  jiffies);
					} else {
						if (l1p->l1_state == 7) {
							del_timer(&l1p->
								  l1_timer);
							l1p->d_if.ifc.
							    l1l2(&l1p->
								 d_if.ifc,
								 PH_ACTIVATE
								 |
								 INDICATION,
								 NULL);
							tx_d_frame(l1p);
						}
						if (l1p->l1_state == 3) {
							if (oldstate != 3)
								l1p->d_if.
								    ifc.
								    l1l2
								    (&l1p->
								     d_if.
								     ifc,
								     PH_DEACTIVATE
								     |
								     INDICATION,
								     NULL);
						}
					}
					printk(KERN_INFO
					       "HFC-4S/8S: TE %d ch %d l1 state %d -> %d\n",
					       l1p->hw->cardnum,
					       l1p->st_num, oldstate,
					       l1p->l1_state);
				}
			}
		}
		b <<= 1;
		l1p++;
	}

	/* now handle the fifos */
	idx = 0;
	fifo_stat = hw->mr.r_irq_fifo_blx;
	l1p = hw->l1;
	while (idx < hw->driver_data.max_st_ports) {

		if (hw->mr.timer_irq) {
			*fifo_stat |= hw->mr.fifo_rx_trans_enables[idx];
			if (hw->fifo_sched_cnt <= 0) {
				*fifo_stat |=
				    hw->mr.fifo_slow_timer_service[l1p->
								   st_num];
			}
		}
		/* ignore fifo 6 (TX E fifo) */
		*fifo_stat &= 0xff - 0x40;

		while (*fifo_stat) {

			if (!l1p->nt_mode) {
				/* RX Fifo has data to read */
				if ((*fifo_stat & 0x20)) {
					*fifo_stat &= ~0x20;
					rx_d_frame(l1p, 0);
				}
				/* E Fifo has data to read */
				if ((*fifo_stat & 0x80)) {
					*fifo_stat &= ~0x80;
					rx_d_frame(l1p, 1);
				}
				/* TX Fifo completed send */
				if ((*fifo_stat & 0x10)) {
					*fifo_stat &= ~0x10;
					tx_d_frame(l1p);
				}
			}
			/* B1 RX Fifo has data to read */
			if ((*fifo_stat & 0x2)) {
				*fifo_stat &= ~0x2;
				rx_b_frame(l1p->b_ch);
			}
			/* B1 TX Fifo has send completed */
			if ((*fifo_stat & 0x1)) {
				*fifo_stat &= ~0x1;
				tx_b_frame(l1p->b_ch);
			}
			/* B2 RX Fifo has data to read */
			if ((*fifo_stat & 0x8)) {
				*fifo_stat &= ~0x8;
				rx_b_frame(l1p->b_ch + 1);
			}
			/* B2 TX Fifo has send completed */
			if ((*fifo_stat & 0x4)) {
				*fifo_stat &= ~0x4;
				tx_b_frame(l1p->b_ch + 1);
			}
		}
		fifo_stat++;
		l1p++;
		idx++;
	}

	if (hw->fifo_sched_cnt <= 0)
		hw->fifo_sched_cnt += (1 << (7 - TRANS_TIMER_MODE));
	hw->mr.timer_irq = 0;	/* clear requested timer irq */
}				/* hfc4s8s_bh */
Пример #12
0
/**
 *  lbs_thread - handles the major jobs in the LBS driver.
 *  It handles all events generated by firmware, RX data received
 *  from firmware and TX data sent from kernel.
 *
 *  @data:	A pointer to &lbs_thread structure
 *  returns:	0
 */
static int lbs_thread(void *data)
{
	struct net_device *dev = data;
	struct lbs_private *priv = dev->ml_priv;
	wait_queue_t wait;

	lbs_deb_enter(LBS_DEB_THREAD);

	init_waitqueue_entry(&wait, current);

	for (;;) {
		int shouldsleep;
		u8 resp_idx;

		lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n",
				priv->currenttxskb, priv->dnld_sent);

		add_wait_queue(&priv->waitq, &wait);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irq(&priv->driver_lock);

		if (kthread_should_stop())
			shouldsleep = 0;	/* Bye */
		else if (priv->surpriseremoved)
			shouldsleep = 1;	/* We need to wait until we're _told_ to die */
		else if (priv->psstate == PS_STATE_SLEEP)
			shouldsleep = 1;	/* Sleep mode. Nothing we can do till it wakes */
		else if (priv->cmd_timed_out)
			shouldsleep = 0;	/* Command timed out. Recover */
		else if (!priv->fw_ready)
			shouldsleep = 1;	/* Firmware not ready. We're waiting for it */
		else if (priv->dnld_sent)
			shouldsleep = 1;	/* Something is en route to the device already */
		else if (priv->tx_pending_len > 0)
			shouldsleep = 0;	/* We've a packet to send */
		else if (priv->resp_len[priv->resp_idx])
			shouldsleep = 0;	/* We have a command response */
		else if (priv->cur_cmd)
			shouldsleep = 1;	/* Can't send a command; one already running */
		else if (!list_empty(&priv->cmdpendingq) &&
					!(priv->wakeup_dev_required))
			shouldsleep = 0;	/* We have a command to send */
		else if (kfifo_len(&priv->event_fifo))
			shouldsleep = 0;	/* We have an event to process */
		else
			shouldsleep = 1;	/* No command */

		if (shouldsleep) {
			lbs_deb_thread("sleeping, connect_status %d, "
				"psmode %d, psstate %d\n",
				priv->connect_status,
				priv->psmode, priv->psstate);
			spin_unlock_irq(&priv->driver_lock);
			schedule();
		} else
			spin_unlock_irq(&priv->driver_lock);

		lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		set_current_state(TASK_RUNNING);
		remove_wait_queue(&priv->waitq, &wait);

		lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		if (kthread_should_stop()) {
			lbs_deb_thread("break from main thread\n");
			break;
		}

		if (priv->surpriseremoved) {
			lbs_deb_thread("adapter removed; waiting to die...\n");
			continue;
		}

		lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
		       priv->currenttxskb, priv->dnld_sent);

		/* Process any pending command response */
		spin_lock_irq(&priv->driver_lock);
		resp_idx = priv->resp_idx;
		if (priv->resp_len[resp_idx]) {
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_command_response(priv,
				priv->resp_buf[resp_idx],
				priv->resp_len[resp_idx]);
			spin_lock_irq(&priv->driver_lock);
			priv->resp_len[resp_idx] = 0;
		}
		spin_unlock_irq(&priv->driver_lock);

		/* Process hardware events, e.g. card removed, link lost */
		spin_lock_irq(&priv->driver_lock);
		while (kfifo_len(&priv->event_fifo)) {
			u32 event;

			if (kfifo_out(&priv->event_fifo,
				(unsigned char *) &event, sizeof(event)) !=
				sizeof(event))
					break;
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_event(priv, event);
			spin_lock_irq(&priv->driver_lock);
		}
		spin_unlock_irq(&priv->driver_lock);

		if (priv->wakeup_dev_required) {
			lbs_deb_thread("Waking up device...\n");
			/* Wake up device */
			if (priv->exit_deep_sleep(priv))
				lbs_deb_thread("Wakeup device failed\n");
			continue;
		}

		/* command timeout stuff */
		if (priv->cmd_timed_out && priv->cur_cmd) {
			struct cmd_ctrl_node *cmdnode = priv->cur_cmd;

			netdev_info(dev, "Timeout submitting command 0x%04x\n",
				    le16_to_cpu(cmdnode->cmdbuf->command));
			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);

			/* Reset card, but only when it isn't in the process
			 * of being shutdown anyway. */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
			if (!dev->dismantle && priv->reset_card)
#else
			if (priv->reset_card)
#endif
				priv->reset_card(priv);
		}
		priv->cmd_timed_out = 0;

		if (!priv->fw_ready)
			continue;

		/* Check if we need to confirm Sleep Request received previously */
		if (priv->psstate == PS_STATE_PRE_SLEEP &&
		    !priv->dnld_sent && !priv->cur_cmd) {
			if (priv->connect_status == LBS_CONNECTED) {
				lbs_deb_thread("pre-sleep, currenttxskb %p, "
					"dnld_sent %d, cur_cmd %p\n",
					priv->currenttxskb, priv->dnld_sent,
					priv->cur_cmd);

				lbs_ps_confirm_sleep(priv);
			} else {
				/* workaround for firmware sending
				 * deauth/linkloss event immediately
				 * after sleep request; remove this
				 * after firmware fixes it
				 */
				priv->psstate = PS_STATE_AWAKE;
				netdev_alert(dev,
					     "ignore PS_SleepConfirm in non-connected state\n");
			}
		}

		/* The PS state is changed during processing of Sleep Request
		 * event above
		 */
		if ((priv->psstate == PS_STATE_SLEEP) ||
		    (priv->psstate == PS_STATE_PRE_SLEEP))
			continue;

		if (priv->is_deep_sleep)
			continue;

		/* Execute the next command */
		if (!priv->dnld_sent && !priv->cur_cmd)
			lbs_execute_next_command(priv);

		spin_lock_irq(&priv->driver_lock);
		if (!priv->dnld_sent && priv->tx_pending_len > 0) {
			int ret = priv->hw_host_to_card(priv, MVMS_DAT,
							priv->tx_pending_buf,
							priv->tx_pending_len);
			if (ret) {
				lbs_deb_tx("host_to_card failed %d\n", ret);
				priv->dnld_sent = DNLD_RES_RECEIVED;
			} else {
				mod_timer(&priv->tx_lockup_timer,
					  jiffies + (HZ * 5));
			}
			priv->tx_pending_len = 0;
			if (!priv->currenttxskb) {
				/* We can wake the queues immediately if we aren't
				   waiting for TX feedback */
				if (priv->connect_status == LBS_CONNECTED)
					netif_wake_queue(priv->dev);
				if (priv->mesh_dev &&
				    netif_running(priv->mesh_dev))
					netif_wake_queue(priv->mesh_dev);
			}
		}
		spin_unlock_irq(&priv->driver_lock);
	}

	del_timer(&priv->command_timer);
	del_timer(&priv->tx_lockup_timer);
	del_timer(&priv->auto_deepsleep_timer);

	lbs_deb_leave(LBS_DEB_THREAD);
	return 0;
}
Пример #13
0
static void zero_resume(struct usb_composite_dev *cdev)
{
	DBG(cdev, "%s\n", __func__);
	del_timer(&autoresume_timer);
}
Пример #14
0
static int qos_flush_qdisc(ASF_uint32_t  ulVsgId,
			ASFQOSDeleteQdisc_t *qdisc)
{
	struct  asf_qdisc *root;
	int	i;
	int	bLockFlag;

	/* Root Qdisc  */
	root = qdisc->dev->asf_qdisc;
	if (!root) {
		asf_err("Qdisc not exists.\n");
		return ASFQOS_SUCCESS;
	}

	ASF_RCU_READ_LOCK(bLockFlag);

	qdisc->dev->asf_qdisc = NULL;

	/* Destroying Shaper */
	switch (root->qdisc_type) {
	case ASF_QDISC_PRIO_DRR:
	case ASF_QDISC_DRR:
	case ASF_QDISC_PRIO:
	{
		struct  asf_prio_sched_data *root_priv;

		root_priv = root->priv;
		for (i = 0; i < ASF_PRIO_MAX; i++) {
			if (root_priv->q[i].shaper)
				kfree(root_priv->q[i].shaper);
		}

	}
	break;
	default:
		asf_err("Ohh.., Unsupported Parent Qdisc\n");
	}

	kfree(root->priv);
	del_timer(&(root->timer));
	/* NAPI */
	napi_disable(&(root->qos_napi));
	netif_napi_del(&(root->qos_napi));

	for (i = 0; i < ASF_MAX_IFACES; i++) {
		if (qdisc_in_use[i] == root) {
			spin_lock(&cnt_lock);
			qdisc_in_use[i] = NULL;
			qdisc_cnt--;
			spin_unlock(&cnt_lock);
			asf_debug("Deleted Qdisc at index %d, qdisc_cnt %d\n",
					i, qdisc_cnt);
			break;
		}
	}
	/* IF Port Shaper exists, Set it as Root Qdisc */
	for (i = 0; i < ASF_MAX_IFACES; i++) {
		struct  asf_qdisc *x = qdisc_in_use[i];
		if (x && (x->dev == qdisc->dev)) {
			/* Configure De-queue NAPI */
			netif_napi_add(x->dev, &(x->qos_napi),
					prio_tx_napi, qos_budget);
			napi_enable(&(x->qos_napi));

			setup_timer(&x->timer, timer_handler,
					(unsigned long)x);
			qdisc->dev->asf_qdisc = x;
			break;
		}
	}

	kfree(root);
	ASF_RCU_READ_UNLOCK(bLockFlag);

	return ASFQOS_SUCCESS;
}
Пример #15
0
void slip_start(void)
{
    char buf[128];
    int disc, sencap;
    int res;

    rx_count = -1;

    if (dynamic_addrs && !force_dynamic) {
	if (debug&DEBUG_VERBOSE)
	    syslog(LOG_INFO,"Fetching IP addresses from SLIP server");
	if (dynamic_mode != DMODE_BOOTP) {
	    fail = 0;
	    failt.data = 0;
	    failt.function = slip_start_fail;
	    failt.expires = 30;	/* give 30 seconds for this to work */
	    add_timer(&failt);
	    if (dynamic_mode == DMODE_REMOTE || dynamic_mode == DMODE_REMOTE_LOCAL)
		if (!grab_addr(&remote_ip)) return;
	    if (dynamic_mode != DMODE_REMOTE)
		if (!grab_addr(&local_ip)) return;
	    if (dynamic_mode == DMODE_LOCAL_REMOTE)
		if (!grab_addr(&remote_ip)) return;
	    del_timer(&failt);
	    syslog(LOG_INFO,"New addresses: local %s, remote %s.",
		local_ip,remote_ip);
	}
    }

    if (ioctl(modem_fd, TIOCGETD, &start_disc) < 0)
	syslog(LOG_ERR,"Can't get line discipline on proxy device: %m"), die(1);

    /* change line disciple to SLIP and set the SLIP encapsulation */
    disc = N_SLIP;
    if ((link_iface = ioctl(modem_fd, TIOCSETD, &disc)) < 0) {
	if (errno == ENFILE) {
	   syslog(LOG_ERR,"No free slip device available for link."), die(1);
	} else if (errno == EEXIST) {
	    syslog(LOG_ERR,"Link device already in slip mode!?");
	} else if (errno == EINVAL) {
	    syslog(LOG_ERR,"SLIP not supported by kernel, can't build link.");
	    die(1);
	} else
	   syslog(LOG_ERR,"Can't set line discipline: %m"), die(1);
    }

    if (ioctl(modem_fd, SIOCSIFENCAP, &slip_encap) < 0)
	syslog(LOG_ERR,"Can't set encapsulation: %m"), die(1);

#ifdef SIOCSKEEPALIVE
    if (keepalive && (ioctl(modem_fd, SIOCSKEEPALIVE, &keepalive) < 0))
      syslog(LOG_ERR, "Can't set keepalive: %m (ignoring error)");
#endif

#ifdef SIOCSOUTFILL
    if (outfill && (ioctl(modem_fd, SIOCSOUTFILL, &outfill) < 0))
      syslog(LOG_ERR, "Can't set outfill: %m (ignoring error)");
#endif

    /* verify that it worked */
    if (ioctl(modem_fd, TIOCGETD, &disc) < 0)
	syslog(LOG_ERR,"Can't get line discipline: %m"), die(1);
    if (ioctl(modem_fd, SIOCGIFENCAP, &sencap) < 0)
	syslog(LOG_ERR,"Can't get encapsulation: %m"), die(1);

    if (disc != N_SLIP || sencap != slip_encap)
        syslog(LOG_ERR,"Couldn't set up the slip link correctly!"), die(1);

    if (debug&DEBUG_VERBOSE)
        syslog(LOG_INFO,"Slip link established on interface sl%d",
	    link_iface);

    /* mark the interface as up */
    if (netmask) {
        sprintf(buf,"%s sl%d %s pointopoint %s netmask %s mtu %d metric %d up",
	    PATH_IFCONFIG,link_iface,local_ip,remote_ip,netmask,mtu, metric);
    } else {
        sprintf(buf,"%s sl%d %s pointopoint %s mtu %d metric %d up",
	    PATH_IFCONFIG,link_iface,local_ip,remote_ip,mtu, metric);
    }
    res = system(buf);
    report_system_result(res,buf);

    /* Set the routing for the new slip interface */
    set_ptp("sl",link_iface,remote_ip,metric);

    /* run bootp if it is asked for */
    if (dynamic_addrs && dynamic_mode == DMODE_BOOTP && !force_dynamic) start_bootp();

    dead = 0;
}
Пример #16
0
void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
	__u16 rsp_opcode = nci_opcode(skb->data);

	/* we got a rsp, stop the cmd timer */
	del_timer(&ndev->cmd_timer);

	pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
		 nci_pbf(skb->data),
		 nci_opcode_gid(rsp_opcode),
		 nci_opcode_oid(rsp_opcode),
		 nci_plen(skb->data));

	/* strip the nci control header */
	skb_pull(skb, NCI_CTRL_HDR_SIZE);

	switch (rsp_opcode) {
	case NCI_OP_CORE_RESET_RSP:
		nci_core_reset_rsp_packet(ndev, skb);
		break;

	case NCI_OP_CORE_INIT_RSP:
		nci_core_init_rsp_packet(ndev, skb);
		break;

	case NCI_OP_CORE_SET_CONFIG_RSP:
		nci_core_set_config_rsp_packet(ndev, skb);
		break;

	case NCI_OP_CORE_CONN_CREATE_RSP:
		nci_core_conn_create_rsp_packet(ndev, skb);
		break;

	case NCI_OP_CORE_CONN_CLOSE_RSP:
		nci_core_conn_close_rsp_packet(ndev, skb);
		break;

	case NCI_OP_RF_DISCOVER_MAP_RSP:
		nci_rf_disc_map_rsp_packet(ndev, skb);
		break;

	case NCI_OP_RF_DISCOVER_RSP:
		nci_rf_disc_rsp_packet(ndev, skb);
		break;

	case NCI_OP_RF_DISCOVER_SELECT_RSP:
		nci_rf_disc_select_rsp_packet(ndev, skb);
		break;

	case NCI_OP_RF_DEACTIVATE_RSP:
		nci_rf_deactivate_rsp_packet(ndev, skb);
		break;

	case NCI_OP_NFCEE_DISCOVER_RSP:
		nci_nfcee_discover_rsp_packet(ndev, skb);
		break;

	case NCI_OP_NFCEE_MODE_SET_RSP:
		nci_nfcee_mode_set_rsp_packet(ndev, skb);
		break;

	default:
		pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
		break;
	}

	kfree_skb(skb);

	/* trigger the next cmd */
	atomic_set(&ndev->cmd_cnt, 1);
	if (!skb_queue_empty(&ndev->cmd_q))
		queue_work(ndev->cmd_wq, &ndev->cmd_work);
}
Пример #17
0
static void headsetobserve_work(struct work_struct *work)
{
	int level = 0;
	//int level2 = 0;
	struct rk_headset_pdata *pdata = headset_info->pdata;
	static unsigned int old_status = 0;
	DBG("---headsetobserve_work---\n");
	mutex_lock(&headset_info->mutex_lock[HEADSET]);

	level = read_gpio(pdata->Headset_gpio);
	if(level < 0)
		goto out;
	/*
	msleep(100);	
	level2 = read_gpio(pdata->Headset_gpio);
	if(level2 < 0)
		goto out;
	if(level2 != level)
		goto out;
	*/
	old_status = headset_info->headset_status;
	if(pdata->headset_in_type == HEADSET_IN_HIGH)
		headset_info->headset_status = level?HEADSET_IN:HEADSET_OUT;
	else
		headset_info->headset_status = level?HEADSET_OUT:HEADSET_IN;

	if(old_status == headset_info->headset_status)	{
		DBG("old_status == headset_info->headset_status\n");
		goto out;
	}
	
	DBG("(headset in is %s)headset status is %s\n",
		pdata->headset_in_type?"high level":"low level",
		headset_info->headset_status?"in":"out");
		
	if(headset_info->headset_status == HEADSET_IN)
	{
		headset_info->cur_headset_status = BIT_HEADSET_NO_MIC;
		if(pdata->headset_in_type == HEADSET_IN_HIGH)
			irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_FALLING);
		else
			irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_RISING);
		if (pdata->Hook_gpio != INVALID_GPIO) {
			del_timer(&headset_info->headset_timer);//Start the timer, wait for switch to the headphone channel
			headset_info->headset_timer.expires = jiffies + 10;
			add_timer(&headset_info->headset_timer);
			goto out;
		}
	}
	else if(headset_info->headset_status == HEADSET_OUT)
	{	
		headset_info->hook_status = HOOK_UP;
		if(headset_info->isHook_irq == enable)
		{
			DBG("disable headset_hook irq\n");
			headset_info->isHook_irq = disable;
			disable_irq(headset_info->irq[HOOK]);		
		}	
		headset_info->cur_headset_status = 0;//~(BIT_HEADSET|BIT_HEADSET_NO_MIC);
		//#if defined(CONFIG_SND_RK_SOC_RK2928) || defined(CONFIG_SOC_RK3028)
		//rk2928_codec_set_spk(HEADSET_OUT);
		//#endif						
		if(pdata->headset_in_type == HEADSET_IN_HIGH)
			irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_RISING);
		else
			irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_FALLING);
	}
	rk28_send_wakeup_key();
	switch_set_state(&headset_info->sdev, headset_info->cur_headset_status);	
	#if defined(CONFIG_SND_RK_SOC_RK2928) || defined(CONFIG_SND_RK29_SOC_RK610)
	if (headset_info->headset_status == HEADSET_OUT)
	{
		mdelay(200);
		rk2928_codec_set_spk(HEADSET_OUT);
		gpio_set_value(pdata->Sw_mic_gpio, headset_info->pdata->Main_mic_io_value);
	}
	#endif
	DBG("headset_info->cur_headset_status = %d\n",headset_info->cur_headset_status);

out:
	mutex_unlock(&headset_info->mutex_lock[HEADSET]);	
}
Пример #18
0
static int ath_bluesleep_gpio_config(int on)
{
	int ret = 0;

	BT_INFO("%s config: %d", __func__, on);
	if (!on) {
		if (disable_irq_wake(bsi->host_wake_irq))
			BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
		goto free_host_wake_irq;
	}

	ret = gpio_request(bsi->host_wake, "bt_host_wake");
	if (ret < 0) {
		BT_ERR("failed to request gpio pin %d, error %d\n",
			bsi->host_wake, ret);
		goto gpio_config_failed;
	}

	/* configure host_wake as input */
	ret = gpio_direction_input(bsi->host_wake);
	if (ret < 0) {
		BT_ERR("failed to config GPIO %d as input pin, err %d\n",
			bsi->host_wake, ret);
		goto gpio_host_wake;
	}

	ret = gpio_request(bsi->ext_wake, "bt_ext_wake");
	if (ret < 0) {
		BT_ERR("failed to request gpio pin %d, error %d\n",
			bsi->ext_wake, ret);
		goto gpio_host_wake;
	}

	ret = gpio_direction_output(bsi->ext_wake, 1);
	if (ret < 0) {
		BT_ERR("failed to config GPIO %d as output pin, err %d\n",
			bsi->ext_wake, ret);
		goto gpio_ext_wake;
	}

	gpio_set_value(bsi->ext_wake, 1);

	/* Initialize spinlock. */
	spin_lock_init(&rw_lock);

	/* Initialize timer */
	init_timer(&tx_timer);
	tx_timer.function = bluesleep_tx_timer_expire;
	tx_timer.data = 0;

	/* initialize host wake tasklet */
	tasklet_init(&hostwake_task, hostwake_interrupt, 0);

	if (bsi->irq_polarity == POLARITY_LOW) {
		ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
				IRQF_DISABLED | IRQF_TRIGGER_FALLING,
				"bluetooth hostwake", NULL);
	} else  {
		ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
				IRQF_DISABLED | IRQF_TRIGGER_RISING,
				"bluetooth hostwake", NULL);
	}
	if (ret  < 0) {
		BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
		goto delete_timer;
	}

	ret = enable_irq_wake(bsi->host_wake_irq);
	if (ret < 0) {
		BT_ERR("Couldn't enable BT_HOST_WAKE as wakeup interrupt");
		goto free_host_wake_irq;
	}

	return 0;

free_host_wake_irq:
	free_irq(bsi->host_wake_irq, NULL);
delete_timer:
	del_timer(&tx_timer);
gpio_ext_wake:
	gpio_free(bsi->ext_wake);
gpio_host_wake:
	gpio_free(bsi->host_wake);
gpio_config_failed:
	return ret;
}
Пример #19
0
VOID
vCommandTimer (
    IN  HANDLE      hDeviceContext
    )
{
    PSDevice        pDevice = (PSDevice)hDeviceContext;
    PSMgmtObject    pMgmt = pDevice->pMgmt;
    PWLAN_IE_SSID   pItemSSID;
    PWLAN_IE_SSID   pItemSSIDCurr;
    CMD_STATUS      Status;
    UINT            ii;
    BYTE            byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
    struct sk_buff  *skb;


    if (pDevice->dwDiagRefCount != 0)
        return;
    if (pDevice->bCmdRunning != TRUE)
        return;

    spin_lock_irq(&pDevice->lock);

    switch ( pDevice->eCommandState ) {

        case WLAN_CMD_SCAN_START:

	pDevice->byReAssocCount = 0;
            if (pDevice->bRadioOff == TRUE) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            }

            if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
                s_bCommandComplete(pDevice);
                CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_AP);
                spin_unlock_irq(&pDevice->lock);
                return;
            }

            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SCAN_START\n");
            pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
            
            if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){
                spin_unlock_irq(&pDevice->lock);
                vCommandTimerWait((HANDLE)pDevice, 10);
                return;
            };

            if (pMgmt->uScanChannel == 0 ) {
                pMgmt->uScanChannel = pDevice->byMinChannel;
                

            }
            if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
                pMgmt->eScanState = WMAC_NO_SCANNING;

                
                
                CARDbSetChannel(pMgmt->pAdapter, pMgmt->uCurrChannel);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
                if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
                    CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC);
                } else {
                    CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_INFRASTRUCTURE);
                }
                vAdHocBeaconRestart(pDevice);
                s_bCommandComplete(pDevice);

            } else {

                 if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel);
                    s_bCommandComplete(pDevice);
                    return;
                }

                if (pMgmt->uScanChannel == pDevice->byMinChannel) {
                    
                    pMgmt->abyScanBSSID[0] = 0xFF;
                    pMgmt->abyScanBSSID[1] = 0xFF;
                    pMgmt->abyScanBSSID[2] = 0xFF;
                    pMgmt->abyScanBSSID[3] = 0xFF;
                    pMgmt->abyScanBSSID[4] = 0xFF;
                    pMgmt->abyScanBSSID[5] = 0xFF;
                    pItemSSID->byElementID = WLAN_EID_SSID;
                    
                    
                    pMgmt->eScanState = WMAC_IS_SCANNING;

                }

                vAdHocBeaconStop(pDevice);

                if (CARDbSetChannel(pMgmt->pAdapter, pMgmt->uScanChannel) == TRUE) {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SCAN Channel: %d\n", pMgmt->uScanChannel);
                } else {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SET SCAN Channel Fail: %d\n", pMgmt->uScanChannel);
                }
                CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_UNKNOWN);

      
	pMgmt->uScanChannel++;

	 if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
                        pMgmt->uScanChannel <= pDevice->byMaxChannel ){
                    pMgmt->uScanChannel=pDevice->byMaxChannel+1;
		 pMgmt->eCommandState = WLAN_CMD_SCAN_END;

                }


                if ((pMgmt->b11hEnable == FALSE) ||
                    (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
                    s_vProbeChannel(pDevice);
                    spin_unlock_irq(&pDevice->lock);
                    vCommandTimerWait((HANDLE)pDevice, WCMD_ACTIVE_SCAN_TIME);
                    return;
                } else {
                    spin_unlock_irq(&pDevice->lock);
                    vCommandTimerWait((HANDLE)pDevice, WCMD_PASSIVE_SCAN_TIME);
                    return;
                }

            }

            break;

        case WLAN_CMD_SCAN_END:

            
            
            CARDbSetChannel(pMgmt->pAdapter, pMgmt->uCurrChannel);
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
            if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
                CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC);
            } else {
                CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_INFRASTRUCTURE);
            }

            pMgmt->eScanState = WMAC_NO_SCANNING;
            vAdHocBeaconRestart(pDevice);

#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
	if(pMgmt->eScanType == WMAC_SCAN_PASSIVE)
			{
				union iwreq_data wrqu;
				memset(&wrqu, 0, sizeof(wrqu));
				wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
			}
#endif
            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_DISASSOCIATE_START :
	pDevice->byReAssocCount = 0;
            if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
                (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            } else {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
                
                vMgrDisassocBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
                pDevice->bLinkPass = FALSE;
                
                pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
                pItemSSID->len = 0;
                memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
                pMgmt->eCurrState = WMAC_STATE_IDLE;
                pMgmt->sNodeDBTable[0].bActive = FALSE;

            }
            netif_stop_queue(pDevice->dev);
            pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT;
            
            if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){
                vCommandTimerWait((HANDLE)pDevice, 10);
                spin_unlock_irq(&pDevice->lock);
                return;
            };
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" CARDbRadioPowerOff\n");
	
           
            s_bCommandComplete(pDevice);
            break;

        case WLAN_DISASSOCIATE_WAIT :
            
            if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){
                vCommandTimerWait((HANDLE)pDevice, 10);
                spin_unlock_irq(&pDevice->lock);
                return;
            };

           
            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_SSID_START:
        	pDevice->byReAssocCount = 0;
            if (pDevice->bRadioOff == TRUE) {
                s_bCommandComplete(pDevice);
                spin_unlock_irq(&pDevice->lock);
                return;
            }

printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID);
                     
                              
            pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
            pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);

            if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
            }

            if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
                ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {

                if (pItemSSID->len == pItemSSIDCurr->len) {
                    if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
                        s_bCommandComplete(pDevice);
                        spin_unlock_irq(&pDevice->lock);
                        return;
                    }
                }

                netif_stop_queue(pDevice->dev);
                pDevice->bLinkPass = FALSE;
            }
            
            pMgmt->eCurrState = WMAC_STATE_IDLE;
            pMgmt->eCurrMode = WMAC_MODE_STANDBY;
            PSvDisablePowerSaving((HANDLE)pDevice);
            BSSvClearNodeDBTable(pDevice, 0);

            vMgrJoinBSSBegin((HANDLE)pDevice, &Status);
            
            if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {

		
                
                if (pMgmt->eCurrState>= WMAC_STATE_AUTH) {
                    vMgrDeAuthenBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status);
                }
                
                vMgrAuthenBeginSta((HANDLE)pDevice, pMgmt, &Status);
                if (Status == CMD_STATUS_SUCCESS) {
		pDevice->byLinkWaitCount = 0;
                    pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
                    vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT);
                    spin_unlock_irq(&pDevice->lock);
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
                    return;
                }
            }
            
            else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
                if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
                    if (netif_queue_stopped(pDevice->dev)){
                        netif_wake_queue(pDevice->dev);
                    }
                    pDevice->bLinkPass = TRUE;

                    pMgmt->sNodeDBTable[0].bActive = TRUE;
                    pMgmt->sNodeDBTable[0].uInActiveCount = 0;
                    bClearBSSID_SCAN(pDevice);
                }
                else {
                    
                    vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
                    if (Status != CMD_STATUS_SUCCESS){
                        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " WLAN_CMD_IBSS_CREATE fail ! \n");
                    };
                    BSSvAddMulticastNode(pDevice);
                }
            }
            
            else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
                if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
                    pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
                    
                    vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
                    if (Status != CMD_STATUS_SUCCESS){
                        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_IBSS_CREATE fail ! \n");
                    };
                    BSSvAddMulticastNode(pDevice);
                    if (netif_queue_stopped(pDevice->dev)){
                        netif_wake_queue(pDevice->dev);
                    }
                    pDevice->bLinkPass = TRUE;
                }
                else {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
		  #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
                    
                        {
                  	union iwreq_data  wrqu;
                  	memset(&wrqu, 0, sizeof (wrqu));
                          wrqu.ap_addr.sa_family = ARPHRD_ETHER;
                  	printk("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
                  	wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
                       }
                    #endif

                }
            }
            s_bCommandComplete(pDevice);
            break;

        case WLAN_AUTHENTICATE_WAIT :
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
            if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
                
                	pDevice->byLinkWaitCount = 0;
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
                vMgrAssocBeginSta((HANDLE)pDevice, pMgmt, &Status);
                if (Status == CMD_STATUS_SUCCESS) {
		pDevice->byLinkWaitCount = 0;
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
                    pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
                    vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT);
                    spin_unlock_irq(&pDevice->lock);
                    return;
                }
            }

	else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
               printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
	   }
	   else  if(pDevice->byLinkWaitCount <= 4){    
                pDevice->byLinkWaitCount ++;
	       printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
	       spin_unlock_irq(&pDevice->lock);
	       vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT/2);
	       return;
	   }
	          pDevice->byLinkWaitCount = 0;
		 #if 0
                     #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
                    
                        {
                  	union iwreq_data  wrqu;
                  	memset(&wrqu, 0, sizeof (wrqu));
                          wrqu.ap_addr.sa_family = ARPHRD_ETHER;
                  	printk("wireless_send_event--->SIOCGIWAP(disassociated:AUTHENTICATE_WAIT_timeout)\n");
                  	wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
                       }
                    #endif
	         #endif
            s_bCommandComplete(pDevice);
            break;

        case WLAN_ASSOCIATE_WAIT :
            if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
                if (pDevice->ePSMode != WMAC_POWER_CAM) {
                    PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
                }
                if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
                    KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset);
                }
                pDevice->bLinkPass = TRUE;
                pDevice->byLinkWaitCount = 0;
                pDevice->byReAssocCount = 0;
                bClearBSSID_SCAN(pDevice);
                if (pDevice->byFOETuning) {
                    BBvSetFOE(pDevice->PortOffset);
                    PSbSendNullPacket(pDevice);
                }
                if (netif_queue_stopped(pDevice->dev)){
                    netif_wake_queue(pDevice->dev);
                }
	     #ifdef TxInSleep
		 if(pDevice->IsTxDataTrigger != FALSE)   {    
                     
		    del_timer(&pDevice->sTimerTxData);
                      init_timer(&pDevice->sTimerTxData);
                      pDevice->sTimerTxData.data = (ULONG)pDevice;
                      pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
                      pDevice->sTimerTxData.expires = RUN_AT(10*HZ);      
                      pDevice->fTxDataInSleep = FALSE;
                      pDevice->nTxDataTimeCout = 0;
		 }
		 else {
		   
		 }
		pDevice->IsTxDataTrigger = TRUE;
                add_timer(&pDevice->sTimerTxData);
             #endif
            }
		   else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
               printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
	   }
	   else  if(pDevice->byLinkWaitCount <= 4){    
                pDevice->byLinkWaitCount ++;
	       printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
	       spin_unlock_irq(&pDevice->lock);
	       vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT/2);
	       return;
	   }
	          pDevice->byLinkWaitCount = 0;
		#if 0
                     #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
                    
                        {
                  	union iwreq_data  wrqu;
                  	memset(&wrqu, 0, sizeof (wrqu));
                          wrqu.ap_addr.sa_family = ARPHRD_ETHER;
                  	printk("wireless_send_event--->SIOCGIWAP(disassociated:ASSOCIATE_WAIT_timeout)\n");
                  	wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
                       }
                    #endif
		#endif

            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_AP_MODE_START :
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");

            if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
                del_timer(&pMgmt->sTimerSecondCallback);
                pMgmt->eCurrState = WMAC_STATE_IDLE;
                pMgmt->eCurrMode = WMAC_MODE_STANDBY;
                pDevice->bLinkPass = FALSE;
                if (pDevice->bEnableHostWEP == TRUE)
                    BSSvClearNodeDBTable(pDevice, 1);
                else
                    BSSvClearNodeDBTable(pDevice, 0);
                pDevice->uAssocCount = 0;
                pMgmt->eCurrState = WMAC_STATE_IDLE;
                pDevice->bFixRate = FALSE;

                vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
                if (Status != CMD_STATUS_SUCCESS){
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " vMgrCreateOwnIBSS fail ! \n");
                };
                
                MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_UNICAST);
                pDevice->byRxMode &= ~RCR_UNICAST;
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode );
                BSSvAddMulticastNode(pDevice);
                if (netif_queue_stopped(pDevice->dev)){
                    netif_wake_queue(pDevice->dev);
                }
                pDevice->bLinkPass = TRUE;
                add_timer(&pMgmt->sTimerSecondCallback);
            }
            s_bCommandComplete(pDevice);
            break;

        case WLAN_CMD_TX_PSPACKET_START :
            
            if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
                while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
                    if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
                        pMgmt->abyPSTxMap[0] &= ~byMask[0];
                        pDevice->bMoreData = FALSE;
                    }
                    else {
                        pDevice->bMoreData = TRUE;
                    }
                    if (!device_dma0_xmit(pDevice, skb, 0)) {
                        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n");
                    }
                    pMgmt->sNodeDBTable[0].wEnQueueCnt--;
                }
            };

            
            for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
                if (pMgmt->sNodeDBTable[ii].bActive &&
                    pMgmt->sNodeDBTable[ii].bRxPSPoll) {
                    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
                               ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
                    while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
                        if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
                            
                            pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
                                    ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
                            pDevice->bMoreData = FALSE;
                        }
                        else {
                            pDevice->bMoreData = TRUE;
                        }
                        if (!device_dma0_xmit(pDevice, skb, ii)) {
                            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n");
                        }
                        pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
                        
                        
                        if (pMgmt->sNodeDBTable[ii].bPSEnable)
                            break;
                    }
                    if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
                        
                        pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
                                    ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
                        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii);
                    }
Пример #20
0
void nr_stop_idletimer(struct sock *sk)
{
	del_timer(&sk->protinfo.nr->idletimer);
}
Пример #21
0
void
L3DelTimer(struct L3Timer *t)
{
	del_timer(&t->tl);
}
Пример #22
0
void nr_stop_heartbeat(struct sock *sk)
{
	del_timer(&sk->timer);
}
Пример #23
0
void
icc_interrupt(struct IsdnCardState *cs, u_char val)
{
    u_char exval, v1;
    struct sk_buff *skb;
    unsigned int count;

    if (cs->debug & L1_DEB_ISAC)
        debugl1(cs, "ICC interrupt %x", val);
    if (val & 0x80) {	/* RME */
        exval = cs->readisac(cs, ICC_RSTA);
        if ((exval & 0x70) != 0x20) {
            if (exval & 0x40) {
                if (cs->debug & L1_DEB_WARN)
                    debugl1(cs, "ICC RDO");
#ifdef ERROR_STATISTIC
                cs->err_rx++;
#endif
            }
            if (!(exval & 0x20)) {
                if (cs->debug & L1_DEB_WARN)
                    debugl1(cs, "ICC CRC error");
#ifdef ERROR_STATISTIC
                cs->err_crc++;
#endif
            }
            cs->writeisac(cs, ICC_CMDR, 0x80);
        } else {
            count = cs->readisac(cs, ICC_RBCL) & 0x1f;
            if (count == 0)
                count = 32;
            icc_empty_fifo(cs, count);
            if ((count = cs->rcvidx) > 0) {
                cs->rcvidx = 0;
                if (!(skb = alloc_skb(count, GFP_ATOMIC)))
                    printk(KERN_WARNING "HiSax: D receive out of memory\n");
                else {
                    memcpy(skb_put(skb, count), cs->rcvbuf, count);
                    skb_queue_tail(&cs->rq, skb);
                }
            }
        }
        cs->rcvidx = 0;
        schedule_event(cs, D_RCVBUFREADY);
    }
    if (val & 0x40) {	/* RPF */
        icc_empty_fifo(cs, 32);
    }
    if (val & 0x20) {	/* RSC */
        /* never */
        if (cs->debug & L1_DEB_WARN)
            debugl1(cs, "ICC RSC interrupt");
    }
    if (val & 0x10) {	/* XPR */
        if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
            del_timer(&cs->dbusytimer);
        if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
            schedule_event(cs, D_CLEARBUSY);
        if (cs->tx_skb) {
            if (cs->tx_skb->len) {
                icc_fill_fifo(cs);
                goto afterXPR;
            } else {
                dev_kfree_skb_irq(cs->tx_skb);
                cs->tx_cnt = 0;
                cs->tx_skb = NULL;
            }
        }
        if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
            cs->tx_cnt = 0;
            icc_fill_fifo(cs);
        } else
            schedule_event(cs, D_XMTBUFREADY);
    }
afterXPR:
    if (val & 0x04) {	/* CISQ */
        exval = cs->readisac(cs, ICC_CIR0);
        if (cs->debug & L1_DEB_ISAC)
            debugl1(cs, "ICC CIR0 %02X", exval );
        if (exval & 2) {
            cs->dc.icc.ph_state = (exval >> 2) & 0xf;
            if (cs->debug & L1_DEB_ISAC)
                debugl1(cs, "ph_state change %x", cs->dc.icc.ph_state);
            schedule_event(cs, D_L1STATECHANGE);
        }
Пример #24
0
static void uart6850_close(int dev)
{
    uart6850_cmd(UART_MODE_ON);
    del_timer(&uart6850_timer);
    uart6850_opened = 0;
}
Пример #25
0
static void mesh_plink_timer(unsigned long data)
{
	struct sta_info *sta;
	__le16 llid, plid, reason;
	struct ieee80211_sub_if_data *sdata;

	/*
	 * This STA is valid because sta_info_destroy() will
	 * del_timer_sync() this timer after having made sure
	 * it cannot be readded (by deleting the plink.)
	 */
	sta = (struct sta_info *) data;

	if (sta->sdata->local->quiescing) {
		sta->plink_timer_was_running = true;
		return;
	}

	spin_lock_bh(&sta->lock);
	if (sta->ignore_plink_timer) {
		sta->ignore_plink_timer = false;
		spin_unlock_bh(&sta->lock);
		return;
	}
	mpl_dbg("Mesh plink timer for %pM fired on state %d\n",
		sta->sta.addr, sta->plink_state);
	reason = 0;
	llid = sta->llid;
	plid = sta->plid;
	sdata = sta->sdata;

	switch (sta->plink_state) {
	case PLINK_OPN_RCVD:
	case PLINK_OPN_SNT:
		/* retry timer */
		if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
			u32 rand;
			mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n",
				sta->sta.addr, sta->plink_retries,
				sta->plink_timeout);
			get_random_bytes(&rand, sizeof(u32));
			sta->plink_timeout = sta->plink_timeout +
					     rand % sta->plink_timeout;
			++sta->plink_retries;
			mod_plink_timer(sta, sta->plink_timeout);
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
					    0, 0);
			break;
		}
		reason = cpu_to_le16(MESH_MAX_RETRIES);
		/* fall through on else */
	case PLINK_CNF_RCVD:
		/* confirm timer */
		if (!reason)
			reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
		sta->plink_state = PLINK_HOLDING;
		mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
		spin_unlock_bh(&sta->lock);
		mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
				    reason);
		break;
	case PLINK_HOLDING:
		/* holding timer */
		del_timer(&sta->plink_timer);
		mesh_plink_fsm_restart(sta);
		spin_unlock_bh(&sta->lock);
		break;
	default:
		spin_unlock_bh(&sta->lock);
		break;
	}
}
Пример #26
0
u8 ps5101_mhl_onoff_ex(bool onoff)
{
	struct ps5101_data *ps5101 = dev_get_drvdata(ps5101_mhldev);


	print_info("ps5101 onoff: %d\n", onoff);

	if (!ps5101 || !ps5101->pdata) {
		print_info("mhl_onoff_ex: getting resource is failed\n");
		return 2;
	}
	if (ps5101->pdata->power_state == onoff) {
		print_info("mhl_onoff_ex: mhl already %s\n", \
							 onoff ? "on" : "off");
		return 2;
	}

	msleep(20);

	ps5101->pdata->power_state = onoff; /*save power state*/
	if (onoff) {
		wake_lock(&ps5101->wake_lock);

#ifdef __CONFIG_RSEN_TIMER__
		check_rsen_timer.function = mhl_check_rsen_timer;
		check_rsen_timer.data = 0;
		check_rsen_timer.expires = jiffies + HZ*RSEN_CHECK_TIME;
		add_timer(&check_rsen_timer);
#endif
	/*send some data for VBUS SRC such a TA or USB or UNKNOWN */

		if (ps5101->pdata->hw_onoff)
			ps5101->pdata->hw_onoff(1);



		if (ps5101->pdata->hw_reset)
			ps5101->pdata->hw_reset();

		msleep(100);

		initps5101(ps5101);
		ps5101_enable_irq();
		return 0;
	} else {

#ifdef __CONFIG_RSEN_TIMER__
		del_timer(&check_rsen_timer);
#endif
		ps5101_disable_irq();

		if (ps5101->pdata->hw_onoff)
			ps5101->pdata->hw_onoff(0);


		if (ps5101->pdata->ps5101_muic_cb) {
			ps5101->pdata->ps5101_muic_cb(false, -1);
			ps5101->vbus_isplugd = false;
			print_info("ps5101_muic_cb: TA disconnect\n");
		}
		wake_unlock(&ps5101->wake_lock);

		return 0;

	}
	return 2;
}
Пример #27
0
static void input_stop_autorepeat(struct input_dev *dev)
{
	del_timer(&dev->timer);
}
Пример #28
0
static void floppy_off(unsigned int nr)
{
    del_timer(motor_off_timer+nr);
    motor_off_timer[nr].expires = 3*HZ;
    add_timer(motor_off_timer+nr);
}
Пример #29
0
void WaitingTimer_Uninit(void)
{
    del_timer(&WaitingTimer);
}
Пример #30
0
static int qos_create_sch(ASF_uint32_t  ulVsgId,
			ASFQOSCreateQdisc_t *qdisc)
{
	struct  asf_qdisc *prio_root, *root =
			qdisc->dev->asf_qdisc;
	int i, replace_qdisc = 0;

	if (root) {
		if ((qdisc->parent == ROOT_ID) ||
			(root->qdisc_type != ASF_QDISC_TBF)) {
			asf_err("Root Qdisc already exists on dev %s\n",
						qdisc->dev->name);
			return ASFQOS_FAILURE;
		} else
			replace_qdisc = 1;
	}
	if (qdisc_cnt  >= ASF_MAX_IFACES) {
		asf_err("NO more Qdisc supported: limit[%d] reached\n",
							ASF_MAX_IFACES);
		return ASFQOS_FAILURE;
	}
	/* Now allocate Root Qdisc  */
	prio_root = (struct asf_qdisc *)
		kzalloc(sizeof(struct  asf_qdisc), GFP_KERNEL);
	if (NULL == prio_root) {
		asf_err("OHHHH   NO Memory for Root Qdisc\n");
		return ASFQOS_FAILURE;
	}
	/* fill up the structure data */
	prio_root->enqueue = qos_enqueue;
	prio_root->dequeue = qos_dequeue;
	prio_root->qdisc_type = qdisc->qdisc_type;
	prio_root->handle = qdisc->handle;
	prio_root->parent = qdisc->parent;
	prio_root->state = SCH_READY;
	prio_root->dev = qdisc->dev;
	prio_root->pShaper = NULL;

	switch (qdisc->qdisc_type) {
	case ASF_QDISC_PRIO:
	{
		struct  asf_prio_sched_data *prio_priv;

		prio_priv = (struct  asf_prio_sched_data *)
				kzalloc(sizeof(struct  asf_prio_sched_data),
				GFP_KERNEL);
		if (NULL == prio_priv) {
			asf_err("OHHHH   NO Memory for PRIV\n");
			kfree(prio_root);
			return ASFQOS_FAILURE;
		}

		prio_priv->bands = qdisc->u.prio.bands;
		for (i = 0; i < ASF_PRIO_MAX; i++) {
			prio_priv->q[i].head = NULL;
			prio_priv->q[i].tail = NULL;
			prio_priv->q[i].queue_size = 0;
			prio_priv->q[i].max_queue_size = queue_len;
			prio_priv->q[i].shaper = NULL;


			spin_lock_init(&(prio_priv->q[i].lock));
		}
		prio_root->priv = prio_priv;

		/* Configure De-queue NAPI */
		netif_napi_add(qdisc->dev, &(prio_root->qos_napi),
					prio_tx_napi, qos_budget);
		napi_enable(&(prio_root->qos_napi));

		setup_timer(&prio_root->timer, timer_handler,
					(unsigned long)prio_root);
	}
	break;

	case ASF_QDISC_PRIO_DRR:
	case ASF_QDISC_DRR:
	{
		struct  asf_prio_drr_sched_data *prio_priv;

		prio_priv = (struct  asf_prio_drr_sched_data *)
				kzalloc(sizeof(struct  asf_prio_drr_sched_data),
				GFP_KERNEL);
		if (NULL == prio_priv) {
			asf_err("OHHHH   NO Memory for PRIV\n");
			kfree(prio_root);
			return ASFQOS_FAILURE;
		}

		prio_priv->bands = 0;
		for (i = 0; i < ASF_PRIO_MAX; i++) {
			prio_priv->q[i].head = NULL;
			prio_priv->q[i].tail = NULL;
			prio_priv->q[i].queue_size = 0;
			prio_priv->q[i].max_queue_size = queue_len;
			prio_priv->q[i].shaper = NULL;
			prio_priv->q[i].classid = 0;
			spin_lock_init(&(prio_priv->q[i].lock));
		}
		prio_root->priv = prio_priv;
		/* Configure De-queue NAPI */
		netif_napi_add(qdisc->dev, &(prio_root->qos_napi),
					prio_drr_tx_napi, qos_budget);
		napi_enable(&(prio_root->qos_napi));

		setup_timer(&prio_root->timer, timer_handler,
					(unsigned long)prio_root);
	}
	break;
	default:
		asf_err("OHHHH, INVALID Scheduler Qdisc Type\n");
		kfree(prio_root);
		return ASFQOS_FAILURE;
	}

	/* Telling net_device to use this root qdisc */
	if (replace_qdisc) {
		prio_root->pShaper = root->pShaper;
		del_timer(&(root->timer));
		/* NAPI */
		napi_disable(&(root->qos_napi));
		netif_napi_del(&(root->qos_napi));
	}
	prio_root->dev->asf_qdisc = prio_root;

	for (i = 0; i < ASF_MAX_IFACES; i++) {
		if (qdisc_in_use[i] == NULL) {
			spin_lock(&cnt_lock);
			qdisc_in_use[i] = prio_root;
			qdisc_cnt++;
			spin_unlock(&cnt_lock);
			break;
		}
	}
	asf_debug("CPU [%d]:ASF PRIO[%d][%s]: handle = 0x%X\n parent = 0x%X,"
			" bands = %d\n", smp_processor_id(), qdisc->qdisc_type,
			qdisc->dev->name, qdisc->handle,
			qdisc->parent, qdisc->u.prio.bands);

	return 0;
}