Beispiel #1
0
static void
amd7930_Dchan_l2l1(struct PStack *st, int pr, void *arg)
{
	struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
	struct sk_buff *skb = arg;
	char str[64];

	switch (pr) {
		case (PH_DATA_REQ):
			if (cs->tx_skb) {
				skb_queue_tail(&cs->sq, skb);
#ifdef L2FRAME_DEBUG		/* psa */
				if (cs->debug & L1_DEB_LAPD)
					Logl2Frame(cs, skb, "PH_DATA Queued", 0);
#endif
			} else {
				if ((cs->dlogflag) && (!(skb->data[2] & 1))) {
					/* I-FRAME */
					LogFrame(cs, skb->data, skb->len);
					sprintf(str, "Q.931 frame user->network tei %d", st->l2.tei);
					dlogframe(cs, skb->data+4, skb->len-4,
						  str);
				}
				cs->tx_skb = skb;
				cs->tx_cnt = 0;
#ifdef L2FRAME_DEBUG		/* psa */
				if (cs->debug & L1_DEB_LAPD)
					Logl2Frame(cs, skb, "PH_DATA", 0);
#endif
				amd7930_dxmit(0, skb->data, skb->len,
					      &amd7930_dxmit_callback, cs);
			}
			break;
		case (PH_PULL_IND):
			if (cs->tx_skb) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
				skb_queue_tail(&cs->sq, skb);
				break;
			}
			if ((cs->dlogflag) && (!(skb->data[2] & 1))) {	/* I-FRAME */
				LogFrame(cs, skb->data, skb->len);
				sprintf(str, "Q.931 frame user->network tei %d", st->l2.tei);
				dlogframe(cs, skb->data + 4, skb->len - 4,
					  str);
			}
			cs->tx_skb = skb;
			cs->tx_cnt = 0;
#ifdef L2FRAME_DEBUG		/* psa */
			if (cs->debug & L1_DEB_LAPD)
				Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
#endif
			amd7930_dxmit(0, cs->tx_skb->data, cs->tx_skb->len,
				      &amd7930_dxmit_callback, cs);
			break;
		case (PH_PULL_REQ):
#ifdef L2FRAME_DEBUG		/* psa */
			if (cs->debug & L1_DEB_LAPD)
				debugl1(cs, "-> PH_REQUEST_PULL");
#endif
			if (!cs->tx_skb) {
				test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
				st->l1.l1l2(st, PH_PULL_CNF, NULL);
			} else
				test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
			break;
	}
}
static void bluecard_write_wakeup(bluecard_info_t *info)
{
	if (!info) {
		printk(KERN_WARNING "bluecard_cs: Call of write_wakeup for unknown device.\n");
		return;
	}

	if (!test_bit(XMIT_SENDING_READY, &(info->tx_state)))
		return;

	if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
		set_bit(XMIT_WAKEUP, &(info->tx_state));
		return;
	}

	do {
		register unsigned int iobase = info->link.io.BasePort1;
		register unsigned int offset;
		register unsigned char command;
		register unsigned long ready_bit;
		register struct sk_buff *skb;
		register int len;

		clear_bit(XMIT_WAKEUP, &(info->tx_state));

		if (!(info->link.state & DEV_PRESENT))
			return;

		if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
			if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state)))
				break;
			offset = 0x10;
			command = REG_COMMAND_TX_BUF_TWO;
			ready_bit = XMIT_BUF_TWO_READY;
		} else {
			if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state)))
				break;
			offset = 0x00;
			command = REG_COMMAND_TX_BUF_ONE;
			ready_bit = XMIT_BUF_ONE_READY;
		}

		if (!(skb = skb_dequeue(&(info->txq))))
			break;

		if (skb->pkt_type & 0x80) {
			/* Disable RTS */
			info->ctrl_reg |= REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);
		}

		/* Activate LED */
		bluecard_enable_activity_led(info);

		/* Send frame */
		len = bluecard_write(iobase, offset, skb->data, skb->len);

		/* Tell the FPGA to send the data */
		outb_p(command, iobase + REG_COMMAND);

		/* Mark the buffer as dirty */
		clear_bit(ready_bit, &(info->tx_state));

		if (skb->pkt_type & 0x80) {

			wait_queue_head_t wait;
			unsigned char baud_reg;

			switch (skb->pkt_type) {
			case PKT_BAUD_RATE_460800:
				baud_reg = REG_CONTROL_BAUD_RATE_460800;
				break;
			case PKT_BAUD_RATE_230400:
				baud_reg = REG_CONTROL_BAUD_RATE_230400;
				break;
			case PKT_BAUD_RATE_115200:
				baud_reg = REG_CONTROL_BAUD_RATE_115200;
				break;
			case PKT_BAUD_RATE_57600:
				/* Fall through... */
			default:
				baud_reg = REG_CONTROL_BAUD_RATE_57600;
				break;
			}

			/* Wait until the command reaches the baseband */
			init_waitqueue_head(&wait);
			interruptible_sleep_on_timeout(&wait, HZ / 10);

			/* Set baud on baseband */
			info->ctrl_reg &= ~0x03;
			info->ctrl_reg |= baud_reg;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Enable RTS */
			info->ctrl_reg &= ~REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Wait before the next HCI packet can be send */
			interruptible_sleep_on_timeout(&wait, HZ);

		}

		if (len == skb->len) {
			kfree_skb(skb);
		} else {
			skb_pull(skb, len);
			skb_queue_head(&(info->txq), skb);
		}

		info->hdev.stat.byte_tx += len;

		/* Change buffer */
		change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state));

	} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));

	clear_bit(XMIT_SENDING, &(info->tx_state));
}
Beispiel #3
0
static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
			struct IsdnCardState **cs_out, int *busy_flag,
			struct module *lockowner)
{
	struct IsdnCardState *cs;

	*cs_out = NULL;

	cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
	if (!cs) {
		printk(KERN_WARNING
		       "HiSax: No memory for IsdnCardState(card %d)\n",
		       cardnr + 1);
		goto out;
	}
	card->cs = cs;
	spin_lock_init(&cs->statlock);
	spin_lock_init(&cs->lock);
	cs->chanlimit = 2;	/* maximum B-channel number */
	cs->logecho = 0;	/* No echo logging */
	cs->cardnr = cardnr;
	cs->debug = L1_DEB_WARN;
	cs->HW_Flags = 0;
	cs->busy_flag = busy_flag;
	cs->irq_flags = I4L_IRQ_FLAG;
#if TEI_PER_CARD
	if (card->protocol == ISDN_PTYPE_NI1)
		test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
#else
	test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
#endif
	cs->protocol = card->protocol;

	if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
		printk(KERN_WARNING
		       "HiSax: Card Type %d out of range\n", card->typ);
		goto outf_cs;
	}
	if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
		printk(KERN_WARNING
		       "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
		goto outf_cs;
	}
	if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
		printk(KERN_WARNING
		       "HiSax: No memory for status_buf(card %d)\n",
		       cardnr + 1);
		goto outf_dlog;
	}
	cs->stlist = NULL;
	cs->status_read = cs->status_buf;
	cs->status_write = cs->status_buf;
	cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
	cs->typ = card->typ;
#ifdef MODULE
	cs->iif.owner = lockowner;
#endif
	strcpy(cs->iif.id, id);
	cs->iif.channels = 2;
	cs->iif.maxbufsize = MAX_DATA_SIZE;
	cs->iif.hl_hdrlen = MAX_HEADER_LEN;
	cs->iif.features =
		ISDN_FEATURE_L2_X75I |
		ISDN_FEATURE_L2_HDLC |
		ISDN_FEATURE_L2_HDLC_56K |
		ISDN_FEATURE_L2_TRANS |
		ISDN_FEATURE_L3_TRANS |
#ifdef	CONFIG_HISAX_1TR6
		ISDN_FEATURE_P_1TR6 |
#endif
#ifdef	CONFIG_HISAX_EURO
		ISDN_FEATURE_P_EURO |
#endif
#ifdef	CONFIG_HISAX_NI1
		ISDN_FEATURE_P_NI1 |
#endif
		0;

	cs->iif.command = HiSax_command;
	cs->iif.writecmd = NULL;
	cs->iif.writebuf_skb = HiSax_writebuf_skb;
	cs->iif.readstat = HiSax_readstatus;
	register_isdn(&cs->iif);
	cs->myid = cs->iif.channels;

	*cs_out = cs;
	return 1;	/* success */

outf_dlog:
	kfree(cs->dlog);
outf_cs:
	kfree(cs);
	card->cs = NULL;
out:
	return 0;	/* error */
}
Beispiel #4
0
int
arcofi_fsm(struct IsdnCardState *cs, int event, void *data) {
	if (cs->debug & L1_DEB_MONITOR) {
		debugl1(cs, "arcofi state %d event %d", cs->dc.isac.arcofi_state, event);
	}
	if (event == ARCOFI_TIMEOUT) {
		cs->dc.isac.arcofi_state = ARCOFI_NOP;
		test_and_set_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags);
		wake_up(&cs->dc.isac.arcofi_wait);
 		return(1);
	}
	switch (cs->dc.isac.arcofi_state) {
		case ARCOFI_NOP:
			if (event == ARCOFI_START) {
				cs->dc.isac.arcofi_list = data;
				cs->dc.isac.arcofi_state = ARCOFI_TRANSMIT;
				send_arcofi(cs);
			}
			break;
		case ARCOFI_TRANSMIT:
			if (event == ARCOFI_TX_END) {
				if (cs->dc.isac.arcofi_list->receive) {
					add_arcofi_timer(cs);
					cs->dc.isac.arcofi_state = ARCOFI_RECEIVE;
				} else {
					if (cs->dc.isac.arcofi_list->next) {
						cs->dc.isac.arcofi_list =
							cs->dc.isac.arcofi_list->next;
						send_arcofi(cs);
					} else {
						if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
							del_timer(&cs->dc.isac.arcofitimer);
						}
						cs->dc.isac.arcofi_state = ARCOFI_NOP;
						wake_up(&cs->dc.isac.arcofi_wait);
					}
				}
			}
			break;
		case ARCOFI_RECEIVE:
			if (event == ARCOFI_RX_END) {
				if (cs->dc.isac.arcofi_list->next) {
					cs->dc.isac.arcofi_list =
						cs->dc.isac.arcofi_list->next;
					cs->dc.isac.arcofi_state = ARCOFI_TRANSMIT;
					send_arcofi(cs);
				} else {
					if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
						del_timer(&cs->dc.isac.arcofitimer);
					}
					cs->dc.isac.arcofi_state = ARCOFI_NOP;
					wake_up(&cs->dc.isac.arcofi_wait);
				}
			}
			break;
		default:
			debugl1(cs, "Arcofi unknown state %x", cs->dc.isac.arcofi_state);
			return(2);
	}
	return(0);
}
/*
 * snd_i2s_alsa_ifx_pcm_trigger- stream activities are handled here
 * This function is called whenever a stream activity is invoked
 * The Trigger function is called in an atomic context
 *
 * Input parameters
 *		@substream:substream for which the stream function is called
 *		@cmd:the stream command thats requested from upper layer
 *
 * Output parameters
 *		@ret_val : status, 0 ==> OK
 *
 */
int snd_i2s_alsa_ifx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
	int ret_val = 0;
	struct intel_alsa_ifx_stream_info *str_info;
	struct snd_pcm_runtime *pl_runtime;
	struct intel_alsa_ssp_dma_buf *pl_dma_buf;
	bool trigger_start = true;

	WARN(!substream, "ALSA_IFX: ERROR NULL substream\n");
	if (!substream)
		return -EINVAL;

	WARN(!substream->runtime, "ALSA_IFX: ERROR NULL substream->runtime\n");
	if (!substream->runtime)
		return -EINVAL;
	pl_runtime = substream->runtime;

	WARN(!pl_runtime->private_data,
			"ALSA_IFX: ERROR NULL pl_runtime->private_data\n");
	if (!pl_runtime->private_data)
		return -EINVAL;

	str_info = pl_runtime->private_data;

	pl_dma_buf = &(str_info->dma_slot);
	pr_debug("ALSA_IFX:  snd_i2s_alsa_ifx_pcm_trigger CMD = 0x%04X\n", cmd);

	switch (cmd) {
	case SNDRV_PCM_TRIGGER_START:
	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
	case SNDRV_PCM_TRIGGER_RESUME:
		pr_debug("ALSA_IFX:  SNDRV_PCM_TRIGGER_START for device %d\n",
				str_info->device_id);
		pr_debug("ALSA_IFX: Trigger Start period_size =%ld\n",
				pl_runtime->period_size);

		if (!test_and_set_bit(INTEL_ALSA_SSP_STREAM_STARTED,
					&str_info->stream_status)) {
			if (test_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
					&str_info->stream_status)) {
				pr_debug("ALSA IFX: Do not restart the trigger, stream running already\n");
				trigger_start = false;
			} else
				trigger_start = true;
		} else {
			WARN(1, "ALSA IFX: ERROR 2 conscutive TRIGGER_START\n");
			return -EBUSY;
		}

		/* Store the substream locally */
		if (trigger_start) {

			pl_dma_buf->length = frames_to_bytes(pl_runtime,
					pl_runtime->period_size);
			pl_dma_buf->addr = pl_runtime->dma_area;
			pl_dma_buf->period_index_max = pl_runtime->periods;

			queue_work(p_alsa_ifx_snd_card->ssp_wq,
					&str_info->ssp_ws);
		}
		str_info->dbg_cum_bytes += frames_to_bytes(substream->runtime,
				substream->runtime->period_size);
		break;

	case SNDRV_PCM_TRIGGER_STOP:
	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
	case SNDRV_PCM_TRIGGER_SUSPEND:
		pr_debug("ALSA_IFX:  SNDRV_PCM_TRIGGER_STOP\n");

		if (test_and_clear_bit(INTEL_ALSA_SSP_STREAM_STARTED,
				&str_info->stream_status))
			set_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
					&str_info->stream_status);
		else {
			WARN(1, "ALSA IFX: trigger START/STOP mismatch\n");
			return -EBUSY;
		}
		break;

	default:
		WARN(1, "ALSA_IFX: snd_i2s_alsa_ifx_pcm_trigger Bad Command\n");
		return -EINVAL;
		break;
	}
	return ret_val;
}
Beispiel #6
0
static int nfcwilink_open(struct nci_dev *ndev)
{
	struct nfcwilink *drv = nci_get_drvdata(ndev);
	unsigned long comp_ret;
	int rc;

	nfc_dev_dbg(&drv->pdev->dev, "open entry");

	if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
		rc = -EBUSY;
		goto exit;
	}

	nfcwilink_proto.priv_data = drv;

	init_completion(&drv->completed);
	drv->st_register_cb_status = -EINPROGRESS;

	rc = st_register(&nfcwilink_proto);
	if (rc < 0) {
		if (rc == -EINPROGRESS) {
			comp_ret = wait_for_completion_timeout(
			&drv->completed,
			msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));

			nfc_dev_dbg(&drv->pdev->dev,
			"wait_for_completion_timeout returned %ld",
			comp_ret);

			if (comp_ret == 0) {
				/* timeout */
				rc = -ETIMEDOUT;
				goto clear_exit;
			} else if (drv->st_register_cb_status != 0) {
				rc = drv->st_register_cb_status;
				nfc_dev_err(&drv->pdev->dev,
				"st_register_cb failed %d", rc);
				goto clear_exit;
			}
		} else {
			nfc_dev_err(&drv->pdev->dev,
				"st_register failed %d", rc);
			goto clear_exit;
		}
	}

	/* st_register MUST fill the write callback */
	BUG_ON(nfcwilink_proto.write == NULL);
	drv->st_write = nfcwilink_proto.write;

	if (nfcwilink_download_fw(drv)) {
		nfc_dev_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d",
				rc);
		/* open should succeed, even if the FW download failed */
	}

	goto exit;

clear_exit:
	clear_bit(NFCWILINK_RUNNING, &drv->flags);

exit:
	return rc;
}
static void
W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
{
	u_char val;
	u_char r;
	struct BCState *bcs;
	struct sk_buff *skb;
	int count;

	bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs+1);
	val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR);
	debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val);

	if (!test_bit(BC_FLG_INIT, &bcs->Flag)) {
		debugl1(cs, "W6692B not INIT yet");
		return;
	}
	if (val & W_B_EXI_RME) {	/* RME */
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB | W_B_STAR_XDOW)) {
			if ((r & W_B_STAR_RDOV) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 B RDOV mode=%d",
						bcs->mode);
			if (r & W_B_STAR_CRCE)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 B CRC error");
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
		} else {
			count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1);
			if (count == 0)
				count = W_B_FIFO_THRESH;
			W6692B_empty_fifo(bcs, count);
			if ((count = bcs->hw.w6692.rcvidx) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "W6692 Bchan Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "W6692: Bchan receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.w6692.rcvidx = 0;
		W6692B_sched_event(bcs, B_RCVBUFREADY);
	}
	if (val & W_B_EXI_RMR) {	/* RMR */
		W6692B_empty_fifo(bcs, W_B_FIFO_THRESH);
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.w6692.rcvidx = 0;
			W6692B_sched_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & W_B_EXI_XFR) {	/* XFR */
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				W6692B_fill_fifo(bcs);
				return;
			} else {
				if (bcs->st->lli.l1writewakeup &&
				 (PACKET_NOACK != bcs->tx_skb->pkt_type))
					bcs->st->lli.l1writewakeup(bcs->st, bcs->hw.w6692.count);
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.w6692.count = 0;
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.w6692.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			W6692B_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			W6692B_sched_event(bcs, B_XMTBUFREADY);
		}
	}
	if (val & W_B_EXI_XDUN) {	/* XDUN */
		if (bcs->mode == 1)
			W6692B_fill_fifo(bcs);
		else {
			/* Here we lost an TX interrupt, so
			   * restart transmitting the whole frame.
			 */
			if (bcs->tx_skb) {
				skb_push(bcs->tx_skb, bcs->hw.w6692.count);
				bcs->tx_cnt += bcs->hw.w6692.count;
				bcs->hw.w6692.count = 0;
			}
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B EXIR %x Lost TX", val);
		}
	}
}
static inline void
restart_t200(struct PStack *st, int i)
{
	FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
	test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
}
Beispiel #9
0
static void hisax_b_sched_event(struct BCState *bcs, int event)
{
	test_and_set_bit(event, &bcs->event);
	schedule_work(&bcs->tqueue);
}
Beispiel #10
0
int __init
setup_diva(struct IsdnCard *card)
{
	int bytecnt = 8;
	u_char val;
	struct IsdnCardState *cs = card->cs;
	char tmp[64];

	strcpy(tmp, Diva_revision);
	printk(KERN_INFO "HiSax: Eicon.Diehl Diva driver Rev. %s\n", HiSax_getrev(tmp));
	if (cs->typ != ISDN_CTYPE_DIEHLDIVA)
		return(0);
	cs->hw.diva.status = 0;
	if (card->para[1]) {
		cs->hw.diva.ctrl_reg = 0;
		cs->hw.diva.cfg_reg = card->para[1];
		val = readreg(cs->hw.diva.cfg_reg + DIVA_IPAC_ADR,
			cs->hw.diva.cfg_reg + DIVA_IPAC_DATA, IPAC_ID);
		printk(KERN_INFO "Diva: IPAC version %x\n", val);
		if ((val == 1) || (val==2)) {
			cs->subtyp = DIVA_IPAC_ISA;
			cs->hw.diva.ctrl = 0;
			cs->hw.diva.isac = card->para[1] + DIVA_IPAC_DATA;
			cs->hw.diva.hscx = card->para[1] + DIVA_IPAC_DATA;
			cs->hw.diva.isac_adr = card->para[1] + DIVA_IPAC_ADR;
			cs->hw.diva.hscx_adr = card->para[1] + DIVA_IPAC_ADR;
			test_and_set_bit(HW_IPAC, &cs->HW_Flags);
		} else {
			cs->subtyp = DIVA_ISA;
			cs->hw.diva.ctrl = card->para[1] + DIVA_ISA_CTRL;
			cs->hw.diva.isac = card->para[1] + DIVA_ISA_ISAC_DATA;
			cs->hw.diva.hscx = card->para[1] + DIVA_HSCX_DATA;
			cs->hw.diva.isac_adr = card->para[1] + DIVA_ISA_ISAC_ADR;
			cs->hw.diva.hscx_adr = card->para[1] + DIVA_HSCX_ADR;
		}
		cs->irq = card->para[0];
	} else {
#ifdef __ISAPNP__
		if (isapnp_present()) {
			struct pci_bus *pb;
			struct pci_dev *pd;

			while(pdev->card_vendor) {
				if ((pb = isapnp_find_card(pdev->card_vendor,
					pdev->card_device, pnp_c))) {
					pnp_c = pb;
					pd = NULL;
					if ((pd = isapnp_find_dev(pnp_c,
						pdev->vendor, pdev->function, pd))) {
						printk(KERN_INFO "HiSax: %s detected\n",
							(char *)pdev->driver_data);
						pd->prepare(pd);
						pd->deactivate(pd);
						pd->activate(pd);
						card->para[1] =
							pd->resource[0].start;
						card->para[0] =
							pd->irq_resource[0].start;
						if (!card->para[0] || !card->para[1]) {
							printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
								card->para[0], card->para[1]);
							pd->deactivate(pd);
							return(0);
						}
						cs->hw.diva.cfg_reg  = card->para[1];
						cs->irq = card->para[0];
						if (pdev->function == ISAPNP_FUNCTION(0xA1)) {
							cs->subtyp = DIVA_IPAC_ISA;
							cs->hw.diva.ctrl = 0;
							cs->hw.diva.isac =
								card->para[1] + DIVA_IPAC_DATA;
							cs->hw.diva.hscx =
								card->para[1] + DIVA_IPAC_DATA;
							cs->hw.diva.isac_adr =
								card->para[1] + DIVA_IPAC_ADR;
							cs->hw.diva.hscx_adr =
								card->para[1] + DIVA_IPAC_ADR;
							test_and_set_bit(HW_IPAC, &cs->HW_Flags);
						} else {
							cs->subtyp = DIVA_ISA;
							cs->hw.diva.ctrl =
								card->para[1] + DIVA_ISA_CTRL;
							cs->hw.diva.isac =
								card->para[1] + DIVA_ISA_ISAC_DATA;
							cs->hw.diva.hscx =
								card->para[1] + DIVA_HSCX_DATA;
							cs->hw.diva.isac_adr =
								card->para[1] + DIVA_ISA_ISAC_ADR;
							cs->hw.diva.hscx_adr =
								card->para[1] + DIVA_HSCX_ADR;
						}
						goto ready;
					} else {
						printk(KERN_ERR "Diva PnP: PnP error card found, no device\n");
						return(0);
					}
				}
				pdev++;
				pnp_c=NULL;
			} 
			if (!pdev->card_vendor) {
				printk(KERN_INFO "Diva PnP: no ISAPnP card found\n");
			}
		}
#endif
#if CONFIG_PCI
		if (!pci_present()) {
			printk(KERN_ERR "Diva: no PCI bus present\n");
			return(0);
		}

		cs->subtyp = 0;
		if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON,
			PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) {
			if (pci_enable_device(dev_diva))
				return(0);
			cs->subtyp = DIVA_PCI;
			cs->irq = dev_diva->irq;
			cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2);
		} else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON,
			PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) {
			if (pci_enable_device(dev_diva_u))
				return(0);
			cs->subtyp = DIVA_PCI;
			cs->irq = dev_diva_u->irq;
			cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2);
		} else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON,
			PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) {
			if (pci_enable_device(dev_diva201))
				return(0);
			cs->subtyp = DIVA_IPAC_PCI;
			cs->irq = dev_diva201->irq;
			cs->hw.diva.pci_cfg =
				(ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096);
			cs->hw.diva.cfg_reg =
				(ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096);
		} else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON,
			PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) {
			if (pci_enable_device(dev_diva202))
				return(0);
			cs->subtyp = DIVA_IPACX_PCI;
			cs->irq = dev_diva202->irq;
			cs->hw.diva.pci_cfg =
				(ulong) ioremap(pci_resource_start(dev_diva202, 0), 4096);
			cs->hw.diva.cfg_reg =
				(ulong) ioremap(pci_resource_start(dev_diva202, 1), 4096);
		} else {
			printk(KERN_WARNING "Diva: No PCI card found\n");
			return(0);
		}

		if (!cs->irq) {
			printk(KERN_WARNING "Diva: No IRQ for PCI card found\n");
			return(0);
		}

		if (!cs->hw.diva.cfg_reg) {
			printk(KERN_WARNING "Diva: No IO-Adr for PCI card found\n");
			return(0);
		}
		cs->irq_flags |= SA_SHIRQ;
#else
		printk(KERN_WARNING "Diva: cfgreg 0 and NO_PCI_BIOS\n");
		printk(KERN_WARNING "Diva: unable to config DIVA PCI\n");
		return (0);
#endif /* CONFIG_PCI */
		if ((cs->subtyp == DIVA_IPAC_PCI) ||
		    (cs->subtyp == DIVA_IPACX_PCI)   ) {
			cs->hw.diva.ctrl = 0;
			cs->hw.diva.isac = 0;
			cs->hw.diva.hscx = 0;
			cs->hw.diva.isac_adr = 0;
			cs->hw.diva.hscx_adr = 0;
			test_and_set_bit(HW_IPAC, &cs->HW_Flags);
			bytecnt = 0;
		} else {
			cs->hw.diva.ctrl = cs->hw.diva.cfg_reg + DIVA_PCI_CTRL;
			cs->hw.diva.isac = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_DATA;
			cs->hw.diva.hscx = cs->hw.diva.cfg_reg + DIVA_HSCX_DATA;
			cs->hw.diva.isac_adr = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_ADR;
			cs->hw.diva.hscx_adr = cs->hw.diva.cfg_reg + DIVA_HSCX_ADR;
			bytecnt = 32;
		}
	}
ready:
	printk(KERN_INFO
		"Diva: %s card configured at %#lx IRQ %d\n",
		(cs->subtyp == DIVA_PCI) ? "PCI" :
		(cs->subtyp == DIVA_ISA) ? "ISA" : 
		(cs->subtyp == DIVA_IPAC_ISA) ? "IPAC ISA" :
		(cs->subtyp == DIVA_IPAC_PCI) ? "IPAC PCI" : "IPACX PCI",
		cs->hw.diva.cfg_reg, cs->irq);
	if ((cs->subtyp == DIVA_IPAC_PCI)  || 
	    (cs->subtyp == DIVA_IPACX_PCI) || 
	    (cs->subtyp == DIVA_PCI)         )
		printk(KERN_INFO "Diva: %s space at %#lx\n",
			(cs->subtyp == DIVA_PCI) ? "PCI" :
			(cs->subtyp == DIVA_IPAC_PCI) ? "IPAC PCI" : "IPACX PCI",
			cs->hw.diva.pci_cfg);
	if ((cs->subtyp != DIVA_IPAC_PCI) &&
	    (cs->subtyp != DIVA_IPACX_PCI)   ) {
		if (check_region(cs->hw.diva.cfg_reg, bytecnt)) {
			printk(KERN_WARNING
			       "HiSax: %s config port %lx-%lx already in use\n",
			       CardType[card->typ],
			       cs->hw.diva.cfg_reg,
			       cs->hw.diva.cfg_reg + bytecnt);
			return (0);
		} else {
			request_region(cs->hw.diva.cfg_reg, bytecnt, "diva isdn");
		}
	}
	reset_diva(cs);
	cs->BC_Read_Reg  = &ReadHSCX;
	cs->BC_Write_Reg = &WriteHSCX;
	cs->BC_Send_Data = &hscx_fill_fifo;
	cs->cardmsg = &Diva_card_msg;
	if (cs->subtyp == DIVA_IPAC_ISA) {
		cs->readisac  = &ReadISAC_IPAC;
		cs->writeisac = &WriteISAC_IPAC;
		cs->readisacfifo  = &ReadISACfifo_IPAC;
		cs->writeisacfifo = &WriteISACfifo_IPAC;
		cs->irq_func = &diva_irq_ipac_isa;
		val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ID);
		printk(KERN_INFO "Diva: IPAC version %x\n", val);
	} else if (cs->subtyp == DIVA_IPAC_PCI) {
		cs->readisac  = &MemReadISAC_IPAC;
		cs->writeisac = &MemWriteISAC_IPAC;
		cs->readisacfifo  = &MemReadISACfifo_IPAC;
		cs->writeisacfifo = &MemWriteISACfifo_IPAC;
		cs->BC_Read_Reg  = &MemReadHSCX;
		cs->BC_Write_Reg = &MemWriteHSCX;
		cs->BC_Send_Data = &Memhscx_fill_fifo;
		cs->irq_func = &diva_irq_ipac_pci;
		val = memreadreg(cs->hw.diva.cfg_reg, IPAC_ID);
		printk(KERN_INFO "Diva: IPAC version %x\n", val);
	} else if (cs->subtyp == DIVA_IPACX_PCI) {
		cs->readisac  = &MemReadISAC_IPACX;
		cs->writeisac = &MemWriteISAC_IPACX;
		cs->readisacfifo  = &MemReadISACfifo_IPACX;
		cs->writeisacfifo = &MemWriteISACfifo_IPACX;
		cs->BC_Read_Reg  = &MemReadHSCX_IPACX;
		cs->BC_Write_Reg = &MemWriteHSCX_IPACX;
		cs->BC_Send_Data = 0; // function located in ipacx module
		cs->irq_func = &diva_irq_ipacx_pci;
		printk(KERN_INFO "Diva: IPACX Design Id: %x\n", 
            MemReadISAC_IPACX(cs, IPACX_ID) &0x3F);
	} else { /* DIVA 2.0 */
		cs->hw.diva.tl.function = (void *) diva_led_handler;
		cs->hw.diva.tl.data = (long) cs;
		init_timer(&cs->hw.diva.tl);
		cs->readisac  = &ReadISAC;
		cs->writeisac = &WriteISAC;
		cs->readisacfifo  = &ReadISACfifo;
		cs->writeisacfifo = &WriteISACfifo;
		cs->irq_func = &diva_interrupt;
		ISACVersion(cs, "Diva:");
		if (HscxVersion(cs, "Diva:")) {
			printk(KERN_WARNING
		       "Diva: wrong HSCX versions check IO address\n");
			release_io_diva(cs);
			return (0);
		}
	}
	return (1);
}
Beispiel #11
0
static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	int new_timeout;
	static const struct watchdog_info ident = {
		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
							WDIOF_MAGICCLOSE,
		.firmware_version = 1,
		.identity = "W83627HF WDT",
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		if (copy_to_user(argp, &ident, sizeof(ident)))
			return -EFAULT;
		break;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);
	case WDIOC_SETOPTIONS:
	{
		int options, retval = -EINVAL;

		if (get_user(options, p))
			return -EFAULT;
		if (options & WDIOS_DISABLECARD) {
			wdt_disable();
			retval = 0;
		}
		if (options & WDIOS_ENABLECARD) {
			wdt_ping();
			retval = 0;
		}
		return retval;
	}
	case WDIOC_KEEPALIVE:
		wdt_ping();
		break;
	case WDIOC_SETTIMEOUT:
		if (get_user(new_timeout, p))
			return -EFAULT;
		if (wdt_set_heartbeat(new_timeout))
			return -EINVAL;
		wdt_ping();
		/* Fall */
	case WDIOC_GETTIMEOUT:
		return put_user(timeout, p);
	default:
		return -ENOTTY;
	}
	return 0;
}

static int wdt_open(struct inode *inode, struct file *file)
{
	if (test_and_set_bit(0, &wdt_is_open))
		return -EBUSY;
	/*
	 *	Activate
	 */

	wdt_ping();
	return nonseekable_open(inode, file);
}

static int wdt_close(struct inode *inode, struct file *file)
{
	if (expect_close == 42)
		wdt_disable();
	else {
//		printk(KERN_CRIT PFX
;
		wdt_ping();
	}
	expect_close = 0;
	clear_bit(0, &wdt_is_open);
	return 0;
}

/*
 *	Notifier for system down
 */

static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
	void *unused)
{
	if (code == SYS_DOWN || code == SYS_HALT)
		wdt_disable();	/* Turn the WDT off */

	return NOTIFY_DONE;
}

/*
 *	Kernel Interfaces
 */

static const struct file_operations wdt_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.write		= wdt_write,
	.unlocked_ioctl	= wdt_ioctl,
	.open		= wdt_open,
	.release	= wdt_close,
};

static struct miscdevice wdt_miscdev = {
	.minor = WATCHDOG_MINOR,
	.name = "watchdog",
	.fops = &wdt_fops,
};

/*
 *	The WDT needs to learn about soft shutdowns in order to
 *	turn the timebomb registers off.
 */

static struct notifier_block wdt_notifier = {
	.notifier_call = wdt_notify_sys,
};

static int __init wdt_init(void)
{
	int ret;

;

	if (wdt_set_heartbeat(timeout)) {
		wdt_set_heartbeat(WATCHDOG_TIMEOUT);
//		printk(KERN_INFO PFX
//		     "timeout value must be 1 <= timeout <= 255, using %d\n",
;
	}

	if (!request_region(wdt_io, 1, WATCHDOG_NAME)) {
//		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
;
		ret = -EIO;
		goto out;
	}

	w83627hf_init();

	ret = register_reboot_notifier(&wdt_notifier);
	if (ret != 0) {
//		printk(KERN_ERR PFX
;
		goto unreg_regions;
	}

	ret = misc_register(&wdt_miscdev);
	if (ret != 0) {
//		printk(KERN_ERR PFX
//			"cannot register miscdev on minor=%d (err=%d)\n",
;
		goto unreg_reboot;
	}

//	printk(KERN_INFO PFX
//			"initialized. timeout=%d sec (nowayout=%d)\n",
;

out:
	return ret;
unreg_reboot:
	unregister_reboot_notifier(&wdt_notifier);
unreg_regions:
	release_region(wdt_io, 1);
	goto out;
}

static void __exit wdt_exit(void)
{
	misc_deregister(&wdt_miscdev);
	unregister_reboot_notifier(&wdt_notifier);
	release_region(wdt_io, 1);
}
Beispiel #12
0
static inline void
Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
{
	u_char r;
	struct BCState *bcs = cs->bcs + hscx;
	struct sk_buff *skb;
	int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags)? 64: 32;
	int count;

	if (!test_bit(BC_FLG_INIT, &bcs->Flag))
		return;

	if (val & 0x80) {	/* RME */
		r = MemReadHSCX(cs, hscx, HSCX_RSTA);
		if ((r & 0xf0) != 0xa0) {
			if (!(r & 0x80))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX invalid frame");
			if ((r & 0x40) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX RDO mode=%d",
						bcs->mode);
			if (!(r & 0x20))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX CRC error");
			MemWriteHSCXCMDR(cs, hscx, 0x80);
		} else {
			count = MemReadHSCX(cs, hscx, HSCX_RBCL) & (
				test_bit(HW_IPAC, &cs->HW_Flags)? 0x3f: 0x1f);
			if (count == 0)
				count = fifo_size;
			Memhscx_empty_fifo(bcs, count);
			if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "HX Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "HSCX: receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.hscx.rcvidx = 0;
		hscx_sched_event(bcs, B_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		Memhscx_empty_fifo(bcs, fifo_size);
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(fifo_size)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.hscx.rcvidx = 0;
			hscx_sched_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & 0x10) {	/* XPR */
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				Memhscx_fill_fifo(bcs);
				return;
			} else {
				if (bcs->st->lli.l1writewakeup &&
					(PACKET_NOACK != bcs->tx_skb->pkt_type))
					bcs->st->lli.l1writewakeup(bcs->st, bcs->hw.hscx.count);
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.hscx.count = 0; 
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.hscx.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			Memhscx_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			hscx_sched_event(bcs, B_XMTBUFREADY);
		}
	}
}
Beispiel #13
0
static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
	/*
	 * In case the thread crashed and was killed we just pretend that
	 * we handled the interrupt. The hardirq handler has disabled the
	 * device interrupt, so no irq storm is lurking.
	 */
	if (action->thread->flags & PF_EXITING)
		return;

	/*
	 * Wake up the handler thread for this action. If the
	 * RUNTHREAD bit is already set, nothing to do.
	 */
	if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
		return;

	/*
	 * It's safe to OR the mask lockless here. We have only two
	 * places which write to threads_oneshot: This code and the
	 * irq thread.
	 *
	 * This code is the hard irq context and can never run on two
	 * cpus in parallel. If it ever does we have more serious
	 * problems than this bitmask.
	 *
	 * The irq threads of this irq which clear their "running" bit
	 * in threads_oneshot are serialized via desc->lock against
	 * each other and they are serialized against this code by
	 * IRQS_INPROGRESS.
	 *
	 * Hard irq handler:
	 *
	 *	spin_lock(desc->lock);
	 *	desc->state |= IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *	set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
	 *	desc->threads_oneshot |= mask;
	 *	spin_lock(desc->lock);
	 *	desc->state &= ~IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *
	 * irq thread:
	 *
	 * again:
	 *	spin_lock(desc->lock);
	 *	if (desc->state & IRQS_INPROGRESS) {
	 *		spin_unlock(desc->lock);
	 *		while(desc->state & IRQS_INPROGRESS)
	 *			cpu_relax();
	 *		goto again;
	 *	}
	 *	if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
	 *		desc->threads_oneshot &= ~mask;
	 *	spin_unlock(desc->lock);
	 *
	 * So either the thread waits for us to clear IRQS_INPROGRESS
	 * or we are waiting in the flow handler for desc->lock to be
	 * released before we reach this point. The thread also checks
	 * IRQTF_RUNTHREAD under desc->lock. If set it leaves
	 * threads_oneshot untouched and runs the thread another time.
	 */
	desc->threads_oneshot |= action->thread_mask;

	/*
	 * We increment the threads_active counter in case we wake up
	 * the irq thread. The irq thread decrements the counter when
	 * it returns from the handler or in the exit path and wakes
	 * up waiters which are stuck in synchronize_irq() when the
	 * active count becomes zero. synchronize_irq() is serialized
	 * against this code (hard irq handler) via IRQS_INPROGRESS
	 * like the finalize_oneshot() code. See comment above.
	 */
	atomic_inc(&desc->threads_active);

	wake_up_process(action->thread);
}
Beispiel #14
0
/* Called from HCI core to initialize the device */
static int ti_st_open(struct hci_dev *hdev)
{
	unsigned long timeleft;
	struct ti_st *hst;
	int err, i;

	BT_DBG("%s %p", hdev->name, hdev);

	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	/* provide contexts for callbacks from ST */
	hst = hdev->driver_data;

	for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
		ti_st_proto[i].priv_data = hst;
		ti_st_proto[i].max_frame_size = HCI_MAX_FRAME_SIZE;
		ti_st_proto[i].recv = st_receive;
		ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;

		/* Prepare wait-for-completion handler */
		init_completion(&hst->wait_reg_completion);
		/* Reset ST registration callback status flag,
		 * this value will be updated in
		 * st_reg_completion_cb()
		 * function whenever it called from ST driver.
		 */
		hst->reg_status = -EINPROGRESS;

		err = st_register(&ti_st_proto[i]);
		if (!err)
			goto done;

		if (err != -EINPROGRESS) {
			clear_bit(HCI_RUNNING, &hdev->flags);
			BT_ERR("st_register failed %d", err);
			return err;
		}

		/* ST is busy with either protocol
		 * registration or firmware download.
		 */
		BT_DBG("waiting for registration "
				"completion signal from ST");
		timeleft = wait_for_completion_timeout
			(&hst->wait_reg_completion,
			 msecs_to_jiffies(BT_REGISTER_TIMEOUT));
		if (!timeleft) {
			clear_bit(HCI_RUNNING, &hdev->flags);
			BT_ERR("Timeout(%d sec),didn't get reg "
					"completion signal from ST",
					BT_REGISTER_TIMEOUT / 1000);
			return -ETIMEDOUT;
		}

		/* Is ST registration callback
		 * called with ERROR status? */
		if (hst->reg_status != 0) {
			clear_bit(HCI_RUNNING, &hdev->flags);
			BT_ERR("ST registration completed with invalid "
					"status %d", hst->reg_status);
			return -EAGAIN;
		}

done:
		hst->st_write = ti_st_proto[i].write;
		if (!hst->st_write) {
			BT_ERR("undefined ST write function");
			clear_bit(HCI_RUNNING, &hdev->flags);
			for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
				/* Undo registration with ST */
				err = st_unregister(&ti_st_proto[i]);
				if (err)
					BT_ERR("st_unregister() failed with "
							"error %d", err);
				hst->st_write = NULL;
			}
			return -EIO;
		}
	}
	return 0;
}
Beispiel #15
0
static void flush_to_ldisc(struct work_struct *work)
{
	struct tty_struct *tty =
		container_of(work, struct tty_struct, buf.work);
	unsigned long 	flags;
	struct tty_ldisc *disc;

	disc = tty_ldisc_ref(tty);
	if (disc == NULL)	/*  !TTY_LDISC */
		return;

	spin_lock_irqsave(&tty->buf.lock, flags);

	if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
		struct tty_buffer *head;
		while ((head = tty->buf.head) != NULL) {
			int count;
			char *char_buf;
			unsigned char *flag_buf;

			count = head->commit - head->read;
			if (!count) {
				if (head->next == NULL)
					break;
				tty->buf.head = head->next;
				tty_buffer_free(tty, head);
				continue;
			}
			/* Ldisc or user is trying to flush the buffers
			   we are feeding to the ldisc, stop feeding the
			   line discipline as we want to empty the queue */
			if (test_bit(TTY_FLUSHPENDING, &tty->flags))
				break;
			if (!tty->receive_room)
				break;
			if (count > tty->receive_room)
				count = tty->receive_room;
			char_buf = head->char_buf_ptr + head->read;
			flag_buf = head->flag_buf_ptr + head->read;
			head->read += count;
			if (disc->ops->receive_buf) {
				spin_unlock_irqrestore(&tty->buf.lock, flags);
				disc->ops->receive_buf(tty, char_buf,
							flag_buf, count);
				spin_lock_irqsave(&tty->buf.lock, flags);
			}
		}
		clear_bit(TTY_FLUSHING, &tty->flags);
	}

	/* We may have a deferred request to flush the input buffer,
	   if so pull the chain under the lock and empty the queue */
	if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
		__tty_buffer_flush(tty);
		clear_bit(TTY_FLUSHPENDING, &tty->flags);
		wake_up(&tty->read_wait);
	}
	spin_unlock_irqrestore(&tty->buf.lock, flags);

	tty_ldisc_deref(disc);
}
Beispiel #16
0
static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
{
	int ret = 0;
	struct IsdnCard *card = cards + cardnr;
	struct IsdnCardState *cs;

	cs = kmalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
	if (!cs) {
		printk(KERN_WARNING
		       "HiSax: No memory for IsdnCardState(card %d)\n",
		       cardnr + 1);
		goto out;
	}
	memset(cs, 0, sizeof(struct IsdnCardState));
	card->cs = cs;
	spin_lock_init(&cs->statlock);
	spin_lock_init(&cs->lock);
	cs->chanlimit = 2;	/* maximum B-channel number */
	cs->logecho = 0;	/* No echo logging */
	cs->cardnr = cardnr;
	cs->debug = L1_DEB_WARN;
	cs->HW_Flags = 0;
	cs->busy_flag = busy_flag;
	cs->irq_flags = I4L_IRQ_FLAG;
#if TEI_PER_CARD
	if (card->protocol == ISDN_PTYPE_NI1)
		test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
#else
	test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
#endif
	cs->protocol = card->protocol;

	if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
		printk(KERN_WARNING
		       "HiSax: Card Type %d out of range\n", card->typ);
		goto outf_cs;
	}
	if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
		printk(KERN_WARNING
		       "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
		goto outf_cs;
	}
	if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
		printk(KERN_WARNING
		       "HiSax: No memory for status_buf(card %d)\n",
		       cardnr + 1);
		goto outf_dlog;
	}
	cs->stlist = NULL;
	cs->status_read = cs->status_buf;
	cs->status_write = cs->status_buf;
	cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
	cs->typ = card->typ;
#ifdef MODULE
	cs->iif.owner = lockowner;
#endif
	strcpy(cs->iif.id, id);
	cs->iif.channels = 2;
	cs->iif.maxbufsize = MAX_DATA_SIZE;
	cs->iif.hl_hdrlen = MAX_HEADER_LEN;
	cs->iif.features =
		ISDN_FEATURE_L2_X75I |
		ISDN_FEATURE_L2_HDLC |
		ISDN_FEATURE_L2_HDLC_56K |
		ISDN_FEATURE_L2_TRANS |
		ISDN_FEATURE_L3_TRANS |
#ifdef	CONFIG_HISAX_1TR6
		ISDN_FEATURE_P_1TR6 |
#endif
#ifdef	CONFIG_HISAX_EURO
		ISDN_FEATURE_P_EURO |
#endif
#ifdef	CONFIG_HISAX_NI1
		ISDN_FEATURE_P_NI1 |
#endif
		0;

	cs->iif.command = HiSax_command;
	cs->iif.writecmd = NULL;
	cs->iif.writebuf_skb = HiSax_writebuf_skb;
	cs->iif.readstat = HiSax_readstatus;
	register_isdn(&cs->iif);
	cs->myid = cs->iif.channels;
	printk(KERN_INFO
	       "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
	       (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
	       (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
	       (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
	       (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
	       "NONE", cs->iif.id, cs->myid);
	switch (card->typ) {
#if CARD_TELES0
	case ISDN_CTYPE_16_0:
	case ISDN_CTYPE_8_0:
		ret = setup_teles0(card);
		break;
#endif
#if CARD_TELES3
	case ISDN_CTYPE_16_3:
	case ISDN_CTYPE_PNP:
	case ISDN_CTYPE_TELESPCMCIA:
	case ISDN_CTYPE_COMPAQ_ISA:
		ret = setup_teles3(card);
		break;
#endif
#if CARD_S0BOX
	case ISDN_CTYPE_S0BOX:
		ret = setup_s0box(card);
		break;
#endif
#if CARD_TELESPCI
	case ISDN_CTYPE_TELESPCI:
		ret = setup_telespci(card);
		break;
#endif
#if CARD_AVM_A1
	case ISDN_CTYPE_A1:
		ret = setup_avm_a1(card);
		break;
#endif
#if CARD_AVM_A1_PCMCIA
	case ISDN_CTYPE_A1_PCMCIA:
		ret = setup_avm_a1_pcmcia(card);
		break;
#endif
#if CARD_FRITZPCI
	case ISDN_CTYPE_FRITZPCI:
		ret = setup_avm_pcipnp(card);
		break;
#endif
#if CARD_ELSA
	case ISDN_CTYPE_ELSA:
	case ISDN_CTYPE_ELSA_PNP:
	case ISDN_CTYPE_ELSA_PCMCIA:
	case ISDN_CTYPE_ELSA_PCI:
		ret = setup_elsa(card);
		break;
#endif
#if CARD_IX1MICROR2
	case ISDN_CTYPE_IX1MICROR2:
		ret = setup_ix1micro(card);
		break;
#endif
#if CARD_DIEHLDIVA
	case ISDN_CTYPE_DIEHLDIVA:
		ret = setup_diva(card);
		break;
#endif
#if CARD_ASUSCOM
	case ISDN_CTYPE_ASUSCOM:
		ret = setup_asuscom(card);
		break;
#endif
#if CARD_TELEINT
	case ISDN_CTYPE_TELEINT:
		ret = setup_TeleInt(card);
		break;
#endif
#if CARD_SEDLBAUER
	case ISDN_CTYPE_SEDLBAUER:
	case ISDN_CTYPE_SEDLBAUER_PCMCIA:
	case ISDN_CTYPE_SEDLBAUER_FAX:
		ret = setup_sedlbauer(card);
		break;
#endif
#if CARD_SPORTSTER
	case ISDN_CTYPE_SPORTSTER:
		ret = setup_sportster(card);
		break;
#endif
#if CARD_MIC
	case ISDN_CTYPE_MIC:
		ret = setup_mic(card);
		break;
#endif
#if CARD_NETJET_S
	case ISDN_CTYPE_NETJET_S:
		ret = setup_netjet_s(card);
		break;
#endif
#if CARD_HFCS
	case ISDN_CTYPE_TELES3C:
	case ISDN_CTYPE_ACERP10:
		ret = setup_hfcs(card);
		break;
#endif
#if CARD_HFC_PCI
	case ISDN_CTYPE_HFC_PCI:
		ret = setup_hfcpci(card);
		break;
#endif
#if CARD_HFC_SX
	case ISDN_CTYPE_HFC_SX:
		ret = setup_hfcsx(card);
		break;
#endif
#if CARD_NICCY
	case ISDN_CTYPE_NICCY:
		ret = setup_niccy(card);
		break;
#endif
#if CARD_AMD7930
	case ISDN_CTYPE_AMD7930:
		ret = setup_amd7930(card);
		break;
#endif
#if CARD_ISURF
	case ISDN_CTYPE_ISURF:
		ret = setup_isurf(card);
		break;
#endif
#if CARD_HSTSAPHIR
	case ISDN_CTYPE_HSTSAPHIR:
		ret = setup_saphir(card);
		break;
#endif
#if CARD_TESTEMU
	case ISDN_CTYPE_TESTEMU:
		ret = setup_testemu(card);
		break;
#endif
#if	CARD_BKM_A4T
	case ISDN_CTYPE_BKM_A4T:
		ret = setup_bkm_a4t(card);
		break;
#endif
#if	CARD_SCT_QUADRO
	case ISDN_CTYPE_SCT_QUADRO:
		ret = setup_sct_quadro(card);
		break;
#endif
#if CARD_GAZEL
	case ISDN_CTYPE_GAZEL:
		ret = setup_gazel(card);
		break;
#endif
#if CARD_W6692
	case ISDN_CTYPE_W6692:
		ret = setup_w6692(card);
		break;
#endif
#if CARD_NETJET_U
	case ISDN_CTYPE_NETJET_U:
		ret = setup_netjet_u(card);
		break;
#endif
#if CARD_FN_ENTERNOW_PCI
	case ISDN_CTYPE_ENTERNOW:
		ret = setup_enternow_pci(card);
		break;
#endif
	case ISDN_CTYPE_DYNAMIC:
		ret = 2;
		break;
	default:
		printk(KERN_WARNING
		       "HiSax: Support for %s Card not selected\n",
		       CardType[card->typ]);
		ll_unload(cs);
		goto outf_cs;
	}
	if (!ret) {
		ll_unload(cs);
		goto outf_cs;
	}
	if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
		printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
		ll_unload(cs);
		goto outf_cs;
	}
	cs->rcvidx = 0;
	cs->tx_skb = NULL;
	cs->tx_cnt = 0;
	cs->event = 0;
	cs->tqueue.data = cs;

	skb_queue_head_init(&cs->rq);
	skb_queue_head_init(&cs->sq);

	init_bcstate(cs, 0);
	init_bcstate(cs, 1);

	/* init_card only handles interrupts which are not */
	/* used here for the loadable driver */
	switch (card->typ) {
		case ISDN_CTYPE_DYNAMIC:
			ret = 0;
			break;
		default:
			ret = init_card(cs);
			break;
	}
	if (ret) {
		closecard(cardnr);
		ret = 0;
		goto outf_cs;
	}
	init_tei(cs, cs->protocol);
	ret = CallcNewChan(cs);
	if (ret) {
		closecard(cardnr);
		ret = 0;
		goto outf_cs;
	}
	/* ISAR needs firmware download first */
	if (!test_bit(HW_ISAR, &cs->HW_Flags))
		ll_run(cs, 0);

	ret = 1;
	goto out;

 outf_dlog:
	kfree(cs->dlog);
 outf_cs:
	kfree(cs);
	card->cs = NULL;
 out:
	return ret;
}
static int
rbio_submit(struct ploop_io * io, struct nfs_read_data * nreq,
	    const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;
	struct rpc_task *task;

	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};

	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = cb,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
		.workqueue = nfsio_workqueue,
#endif
		.flags = RPC_TASK_ASYNC,
	};

	nreq->res.count = nreq->args.count;
	nreq->header->cred = msg.rpc_cred;
	nreq->args.context = ctx;

	task_setup_data.task = &nreq->task;
	task_setup_data.callback_data = nreq;
	msg.rpc_argp = &nreq->args;
	msg.rpc_resp = &nreq->res;
	NFS_PROTO(inode)->read_setup(nreq, &msg);

	task = rpc_run_task(&task_setup_data);
	if (unlikely(IS_ERR(task)))
		return PTR_ERR(task);

	rpc_put_task(task);
	return 0;
}

#else

static int
rbio_submit(struct ploop_io * io, struct nfs_read_data * nreq,
	    const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	nreq->res.count = nreq->args.count;
	nreq->cred = ctx->cred;
	nreq->args.context = ctx;

	rpc_init_task(&nreq->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, cb, nreq);
	NFS_PROTO(inode)->read_setup(nreq);

	nreq->task.tk_cookie = (unsigned long) inode;

	lock_kernel();
	rpc_execute(&nreq->task);
	unlock_kernel();
	return 0;
}
#endif

static void
nfsio_submit_read(struct ploop_io *io, struct ploop_request * preq,
		  struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t rsize = NFS_SERVER(inode)->rsize;
	struct nfs_read_data *nreq = NULL;
	loff_t pos;
	unsigned int prev_end;
	struct bio * b;

	ploop_prepare_io_request(preq);

	pos = sbl->head->bi_sector;
	pos = ((loff_t)iblk << preq->plo->cluster_log) | (pos & ((1<<preq->plo->cluster_log) - 1));
	pos <<= 9;

	prev_end = PAGE_SIZE;

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int bv_idx;

		for (bv_idx = 0; bv_idx < b->bi_vcnt; bv_idx++) {
			struct bio_vec * bv = &b->bi_io_vec[bv_idx];

			if (nreq && nreq->args.count + bv->bv_len <= rsize) {
				if (nreq->pages.pagevec[nreq->pages.npages-1] == bv->bv_page &&
				    prev_end == bv->bv_offset) {
					nreq->args.count += bv->bv_len;
					pos += bv->bv_len;
					prev_end += bv->bv_len;
					continue;
				}
				if (nreq->pages.npages < MAX_NBIO_PAGES &&
				    bv->bv_offset == 0 && prev_end == PAGE_SIZE) {
					nreq->args.count += bv->bv_len;
					nreq->pages.pagevec[nreq->pages.npages] = bv->bv_page;
					nreq->pages.npages++;
					pos += bv->bv_len;
					prev_end = bv->bv_offset + bv->bv_len;
					continue;
				}
			}

			if (nreq) {
				int err;

				atomic_inc(&preq->io_count);

				err = rbio_submit(io, nreq, &nfsio_read_ops);
				if (err) {
					PLOOP_REQ_SET_ERROR(preq, err);
					ploop_complete_io_request(preq);
					goto out;
				}
			}

			nreq = rbio_init(pos, bv->bv_page, bv->bv_offset,
					 bv->bv_len, preq, inode);

			if (nreq == NULL) {
				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
				goto out;
			}

			pos += bv->bv_len;
			prev_end = bv->bv_offset + bv->bv_len;
		}
	}

	if (nreq) {
		int err;

		atomic_inc(&preq->io_count);

		err = rbio_submit(io, nreq, &nfsio_read_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			ploop_complete_io_request(preq);
			goto out;
		}
	}

out:
	ploop_complete_io_request(preq);
}

static void nfsio_write_result(struct rpc_task *task, void *calldata)
{
	struct nfs_write_data *data = calldata;
	struct nfs_writeargs	*argp = &data->args;
	struct nfs_writeres	*resp = &data->res;
	int status;

	status = NFS_PROTO(data->header->inode)->write_done(task, data);
	if (status != 0)
		return;

	if (task->tk_status >= 0 && resp->count < argp->count)
		task->tk_status = -EIO;
}

static void nfsio_write_release(void *calldata)
{
	struct nfs_write_data *nreq = calldata;
	struct ploop_request *preq = (struct ploop_request *) nreq->header->req;
	int status = nreq->task.tk_status;

	if (unlikely(status < 0))
		PLOOP_REQ_SET_ERROR(preq, status);

	if (!preq->error &&
	    nreq->res.verf->committed != NFS_FILE_SYNC) {
		if (!test_and_set_bit(PLOOP_REQ_UNSTABLE, &preq->state))
			memcpy(&preq->verf, &nreq->res.verf->verifier, 8);
	}
	nfsio_complete_io_request(preq);

	nfsio_wbio_release(calldata);
}

static const struct rpc_call_ops nfsio_write_ops = {
	.rpc_call_done = nfsio_write_result,
	.rpc_release = nfsio_write_release,
};

static struct nfs_write_data *
wbio_init(loff_t pos, struct page * page, unsigned int off, unsigned int len,
	  void * priv, struct inode * inode)
{
	struct nfs_write_data * nreq;

	nreq = nfsio_wbio_alloc(MAX_NBIO_PAGES);
	if (unlikely(nreq == NULL))
		return NULL;

	nreq->args.offset = pos;
	nreq->args.pgbase = off;
	nreq->args.count = len;
	nreq->pages.pagevec[0] = page;
	nreq->pages.npages = 1;
	nreq->header->req = priv;
	nreq->header->inode = inode;
	nreq->args.fh = NFS_FH(inode);
	nreq->args.pages = nreq->pages.pagevec;
	nreq->args.stable = NFS_UNSTABLE;
	nreq->res.fattr = &nreq->fattr;
	nreq->res.verf = &nreq->verf;
	return nreq;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)

static int wbio_submit(struct ploop_io * io, struct nfs_write_data *nreq,
		       const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};

	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = cb,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
		.workqueue = nfsio_workqueue,
#endif
		.flags = RPC_TASK_ASYNC,
	};

	if (verify_bounce(nreq))
		return -ENOMEM;

	nreq->res.count = nreq->args.count;
	nreq->args.context = ctx;
	nreq->header->cred = msg.rpc_cred;

	task_setup_data.task = &nreq->task;
	task_setup_data.callback_data = nreq;
	msg.rpc_argp = &nreq->args;
	msg.rpc_resp = &nreq->res;
	NFS_PROTO(inode)->write_setup(nreq, &msg);

	task = rpc_run_task(&task_setup_data);
	if (unlikely(IS_ERR(task)))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

#else

static int wbio_submit(struct ploop_io * io, struct nfs_write_data *nreq,
		       const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	if (verify_bounce(nreq))
		return -ENOMEM;

	nreq->res.count = nreq->args.count;
	nreq->args.context = ctx;
	nreq->cred = ctx->cred;

	rpc_init_task(&nreq->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, cb, nreq);
	NFS_PROTO(inode)->write_setup(nreq, NFS_UNSTABLE);

	nreq->task.tk_priority = RPC_PRIORITY_NORMAL;
	nreq->task.tk_cookie = (unsigned long) inode;

	lock_kernel();
	rpc_execute(&nreq->task);
	unlock_kernel();
	return 0;
}

#endif


static void
nfsio_submit_write(struct ploop_io *io, struct ploop_request * preq,
		   struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t wsize = NFS_SERVER(inode)->wsize;
	struct nfs_write_data *nreq = NULL;
	loff_t pos;
	struct bio * b;
	unsigned int prev_end;

	nfsio_prepare_io_request(preq);

	pos = sbl->head->bi_sector;
	pos = ((loff_t)iblk << preq->plo->cluster_log) | (pos & ((1<<preq->plo->cluster_log) - 1));
	ploop_prepare_tracker(preq, pos);
	pos <<= 9;

	prev_end = PAGE_SIZE;

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int bv_idx;

		for (bv_idx = 0; bv_idx < b->bi_vcnt; bv_idx++) {
			struct bio_vec * bv = &b->bi_io_vec[bv_idx];

			if (nreq && nreq->args.count + bv->bv_len <= wsize) {
				if (nreq->pages.pagevec[nreq->pages.npages-1] == bv->bv_page &&
				    prev_end == bv->bv_offset) {
					nreq->args.count += bv->bv_len;
					pos += bv->bv_len;
					prev_end += bv->bv_len;
					continue;
				}
				if (nreq->pages.npages < MAX_NBIO_PAGES &&
				    bv->bv_offset == 0 && prev_end == PAGE_SIZE) {
					nreq->args.count += bv->bv_len;
					nreq->pages.pagevec[nreq->pages.npages] = bv->bv_page;
					nreq->pages.npages++;
					pos += bv->bv_len;
					prev_end = bv->bv_offset + bv->bv_len;
					continue;
				}
			}

			if (nreq) {
				int err;
				atomic_inc(&preq->io_count);
				err = wbio_submit(io, nreq, &nfsio_write_ops);
				if (err) {
					PLOOP_REQ_SET_ERROR(preq, err);
					nfsio_complete_io_request(preq);
					goto out;
				}
			}

			nreq = wbio_init(pos, bv->bv_page, bv->bv_offset,
					 bv->bv_len, preq, inode);

			if (nreq == NULL) {
				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
				goto out;
			}

			prev_end = bv->bv_offset + bv->bv_len;
			pos += bv->bv_len;
		}
	}

	if (nreq) {
		int err;
		atomic_inc(&preq->io_count);
		err = wbio_submit(io, nreq, &nfsio_write_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			nfsio_complete_io_request(preq);
		}
	}

out:
	nfsio_complete_io_request(preq);
}

static void
nfsio_submit(struct ploop_io *io, struct ploop_request * preq,
	     unsigned long rw,
	     struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	if (iblk == PLOOP_ZERO_INDEX)
		iblk = 0;

	if (rw & (1<<BIO_RW))
		nfsio_submit_write(io, preq, sbl, iblk, size);
	else
		nfsio_submit_read(io, preq, sbl, iblk, size);
}

struct bio_list_walk
{
	struct bio * cur;
	int idx;
	int bv_off;
};

static void
nfsio_submit_write_pad(struct ploop_io *io, struct ploop_request * preq,
		       struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t wsize = NFS_SERVER(inode)->wsize;
	struct nfs_write_data *nreq = NULL;
	struct bio_list_walk bw;
	unsigned prev_end;

	loff_t pos, end_pos, start, end;

	/* pos..end_pos is the range which we are going to write */
	pos = (loff_t)iblk << (preq->plo->cluster_log + 9);
	end_pos = pos + (1 << (preq->plo->cluster_log + 9));

	/* start..end is data that we have. The rest must be zero padded. */
	start = pos + ((sbl->head->bi_sector & ((1<<preq->plo->cluster_log) - 1)) << 9);
	end = start + (size << 9);

	nfsio_prepare_io_request(preq);
	ploop_prepare_tracker(preq, start >> 9);

	prev_end = PAGE_SIZE;

#if 1
	/* GCC, shut up! */
	bw.cur = sbl->head;
	bw.idx = 0;
	bw.bv_off = 0;
	BUG_ON(bw.cur->bi_io_vec[0].bv_len & 511);
#endif

	while (pos < end_pos) {
		struct page * page;
		unsigned int poff, plen;

		if (pos < start) {
			page = ZERO_PAGE(0);
			poff = 0;
			plen = start - pos;
			if (plen > PAGE_SIZE)
				plen = PAGE_SIZE;
		} else if (pos >= end) {
			page = ZERO_PAGE(0);
			poff = 0;
			plen = end_pos - pos;
			if (plen > PAGE_SIZE)
				plen = PAGE_SIZE;
		} else {
			/* pos >= start && pos < end */
			struct bio_vec * bv;

			if (pos == start) {
				bw.cur = sbl->head;
				bw.idx = 0;
				bw.bv_off = 0;
				BUG_ON(bw.cur->bi_io_vec[0].bv_len & 511);
			}
			bv = bw.cur->bi_io_vec + bw.idx;

			if (bw.bv_off >= bv->bv_len) {
				bw.idx++;
				bv++;
				bw.bv_off = 0;
				if (bw.idx >= bw.cur->bi_vcnt) {
					bw.cur = bw.cur->bi_next;
					bw.idx = 0;
					bw.bv_off = 0;
					bv = bw.cur->bi_io_vec;
				}
				BUG_ON(bv->bv_len & 511);
			}

			page = bv->bv_page;
			poff = bv->bv_offset + bw.bv_off;
			plen = bv->bv_len - bw.bv_off;
		}

		if (nreq && nreq->args.count + plen <= wsize) {
			if (nreq->pages.pagevec[nreq->pages.npages-1] == page &&
			    prev_end == poff) {
				nreq->args.count += plen;
				pos += plen;
				bw.bv_off += plen;
				prev_end += plen;
				continue;
			}
			if (nreq->pages.npages < MAX_NBIO_PAGES &&
			    poff == 0 && prev_end == PAGE_SIZE) {
				nreq->args.count += plen;
				nreq->pages.pagevec[nreq->pages.npages] = page;
				nreq->pages.npages++;
				pos += plen;
				bw.bv_off += plen;
				prev_end = poff + plen;
				continue;
			}
		}

		if (nreq) {
			int err;
			atomic_inc(&preq->io_count);
			err = wbio_submit(io, nreq, &nfsio_write_ops);
			if (err) {
				PLOOP_REQ_SET_ERROR(preq, err);
				nfsio_complete_io_request(preq);
				goto out;
			}
		}

		nreq = wbio_init(pos, page, poff, plen, preq, inode);

		if (nreq == NULL) {
			PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
			goto out;
		}

		prev_end = poff + plen;
		pos += plen;
		bw.bv_off += plen;
	}

	if (nreq) {
		int err;
		atomic_inc(&preq->io_count);
		err = wbio_submit(io, nreq, &nfsio_write_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			nfsio_complete_io_request(preq);
		}
	}

out:
	nfsio_complete_io_request(preq);
}


static void
nfsio_submit_alloc(struct ploop_io *io, struct ploop_request * preq,
		 struct bio_list * sbl, unsigned int size)
{
	iblock_t iblk = io->alloc_head++;

	if (!(io->files.file->f_mode & FMODE_WRITE)) {
		PLOOP_FAIL_REQUEST(preq, -EBADF);
		return;
	}
	preq->iblock = iblk;
	preq->eng_state = PLOOP_E_DATA_WBI;

	nfsio_submit_write_pad(io, preq, sbl, iblk, size);
}

static void nfsio_destroy(struct ploop_io * io)
{
	if (io->fsync_thread) {
		kthread_stop(io->fsync_thread);
		io->fsync_thread = NULL;
	}

	if (io->files.file) {
		struct file * file = io->files.file;
		mutex_lock(&io->plo->sysfs_mutex);
		io->files.file = NULL;
		if (io->files.mapping)
			(void)invalidate_inode_pages2(io->files.mapping);
		mutex_unlock(&io->plo->sysfs_mutex);
		fput(file);
	}
}

static int nfsio_sync(struct ploop_io * io)
{
	return 0;
}

static int nfsio_stop(struct ploop_io * io)
{
	return 0;
}


static int
nfsio_init(struct ploop_io * io)
{
	INIT_LIST_HEAD(&io->fsync_queue);
	init_waitqueue_head(&io->fsync_waitq);
	return 0;
}
Beispiel #18
0
static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
			      char *fp, int count)
{
	const unsigned char *p;
	char *f, flags = TTY_NORMAL;
	int	i;
	char	buf[64];
	unsigned long cpuflags;

	if (!tty->read_buf)
		return;

	if (tty->real_raw) {
		spin_lock_irqsave(&tty->read_lock, cpuflags);
		i = min(N_TTY_BUF_SIZE - tty->read_cnt,
			N_TTY_BUF_SIZE - tty->read_head);
		i = min(count, i);
		memcpy(tty->read_buf + tty->read_head, cp, i);
		tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
		tty->read_cnt += i;
		cp += i;
		count -= i;

		i = min(N_TTY_BUF_SIZE - tty->read_cnt,
			N_TTY_BUF_SIZE - tty->read_head);
		i = min(count, i);
		memcpy(tty->read_buf + tty->read_head, cp, i);
		tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
		tty->read_cnt += i;
		spin_unlock_irqrestore(&tty->read_lock, cpuflags);
	} else {
		for (i=count, p = cp, f = fp; i; i--, p++) {
			if (f)
				flags = *f++;
			switch (flags) {
			case TTY_NORMAL:
				n_tty_receive_char(tty, *p);
				break;
			case TTY_BREAK:
				n_tty_receive_break(tty);
				break;
			case TTY_PARITY:
			case TTY_FRAME:
				n_tty_receive_parity_error(tty, *p);
				break;
			case TTY_OVERRUN:
				n_tty_receive_overrun(tty);
				break;
			default:
				printk("%s: unknown flag %d\n",
				       tty_name(tty, buf), flags);
				break;
			}
		}
		if (tty->driver->flush_chars)
			tty->driver->flush_chars(tty);
	}

	if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) {
		kill_fasync(&tty->fasync, SIGIO, POLL_IN);
		if (waitqueue_active(&tty->read_wait))
			wake_up_interruptible(&tty->read_wait);
	}

	/*
	 * Check the remaining room for the input canonicalization
	 * mode.  We don't want to throttle the driver if we're in
	 * canonical mode and don't have a newline yet!
	 */
	if (n_tty_receive_room(tty) < TTY_THRESHOLD_THROTTLE) {
		/* check TTY_THROTTLED first so it indicates our state */
		if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
		    tty->driver->throttle)
			tty->driver->throttle(tty);
	}
}
Beispiel #19
0
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
			    struct ieee80211_vif *vif)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
	struct rt2x00_intf *intf = vif_to_intf(vif);
	struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
	struct queue_entry *entry = NULL;
	unsigned int i;

	/*
	 * Don't allow interfaces to be added
	 * the device has disappeared.
	 */
	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
	    !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
		return -ENODEV;

	switch (vif->type) {
	case NL80211_IFTYPE_AP:
		/*
		 * We don't support mixed combinations of
		 * sta and ap interfaces.
		 */
		if (rt2x00dev->intf_sta_count)
			return -ENOBUFS;

		/*
		 * Check if we exceeded the maximum amount
		 * of supported interfaces.
		 */
		if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
			return -ENOBUFS;

		break;
	case NL80211_IFTYPE_STATION:
	case NL80211_IFTYPE_ADHOC:
	case NL80211_IFTYPE_MESH_POINT:
	case NL80211_IFTYPE_WDS:
		/*
		 * We don't support mixed combinations of
		 * sta and ap interfaces.
		 */
		if (rt2x00dev->intf_ap_count)
			return -ENOBUFS;

		/*
		 * Check if we exceeded the maximum amount
		 * of supported interfaces.
		 */
		if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
			return -ENOBUFS;

		break;
	default:
		return -EINVAL;
	}

	/*
	 * Loop through all beacon queues to find a free
	 * entry. Since there are as much beacon entries
	 * as the maximum interfaces, this search shouldn't
	 * fail.
	 */
	for (i = 0; i < queue->limit; i++) {
		entry = &queue->entries[i];
		if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
			break;
	}

	if (unlikely(i == queue->limit))
		return -ENOBUFS;

	/*
	 * We are now absolutely sure the interface can be created,
	 * increase interface count and start initialization.
	 */

	if (vif->type == NL80211_IFTYPE_AP)
		rt2x00dev->intf_ap_count++;
	else
		rt2x00dev->intf_sta_count++;

	spin_lock_init(&intf->lock);
	spin_lock_init(&intf->seqlock);
	mutex_init(&intf->beacon_skb_mutex);
	intf->beacon = entry;

	/*
	 * The MAC adddress must be configured after the device
	 * has been initialized. Otherwise the device can reset
	 * the MAC registers.
	 * The BSSID address must only be configured in AP mode,
	 * however we should not send an empty BSSID address for
	 * STA interfaces at this time, since this can cause
	 * invalid behavior in the device.
	 */
	memcpy(&intf->mac, vif->addr, ETH_ALEN);
	rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
			      intf->mac, NULL);

	/*
	 * Some filters depend on the current working mode. We can force
	 * an update during the next configure_filter() run by mac80211 by
	 * resetting the current packet_filter state.
	 */
	rt2x00dev->packet_filter = 0;

	return 0;
}
Beispiel #20
0
static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
{
	int i;
	unsigned long timeout;
	elp_device *adapter = netdev_priv(dev);
	unsigned long flags;

	check_3c505_dma(dev);

	if (adapter->dmaing && adapter->current_dma.direction == 0)
		return false;

	/* Avoid contention */
	if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
		if (elp_debug >= 3) {
			pr_debug("%s: send_pcb entered while threaded\n", dev->name);
		}
		return false;
	}
	/*
	 * load each byte into the command register and
	 * wait for the HCRE bit to indicate the adapter
	 * had read the byte
	 */
	set_hsf(dev, 0);

	if (send_pcb_slow(dev->base_addr, pcb->command))
		goto abort;

	spin_lock_irqsave(&adapter->lock, flags);

	if (send_pcb_fast(dev->base_addr, pcb->length))
		goto sti_abort;

	for (i = 0; i < pcb->length; i++) {
		if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
			goto sti_abort;
	}

	outb_control(adapter->hcr_val | 3, dev);	/* signal end of PCB */
	outb_command(2 + pcb->length, dev->base_addr);

	/* now wait for the acknowledgement */
	spin_unlock_irqrestore(&adapter->lock, flags);

	for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
		switch (GET_ASF(dev->base_addr)) {
		case ASF_PCB_ACK:
			adapter->send_pcb_semaphore = 0;
			return true;

		case ASF_PCB_NAK:
#ifdef ELP_DEBUG
			pr_debug("%s: send_pcb got NAK\n", dev->name);
#endif
			goto abort;
		}
	}

	if (elp_debug >= 1)
		pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n",
			dev->name, inb_status(dev->base_addr));
	goto abort;

      sti_abort:
	spin_unlock_irqrestore(&adapter->lock, flags);
      abort:
	adapter->send_pcb_semaphore = 0;
	return false;
}
static void
W6692_l1hw(struct PStack *st, int pr, void *arg)
{
	struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
	struct sk_buff *skb = arg;
	int val;

	switch (pr) {
		case (PH_DATA | REQUEST):
			if (cs->debug & DEB_DLOG_HEX)
				LogFrame(cs, skb->data, skb->len);
			if (cs->debug & DEB_DLOG_VERBOSE)
				dlogframe(cs, skb, 0);
			if (cs->tx_skb) {
				skb_queue_tail(&cs->sq, skb);
#ifdef L2FRAME_DEBUG		/* psa */
				if (cs->debug & L1_DEB_LAPD)
					Logl2Frame(cs, skb, "PH_DATA Queued", 0);
#endif
			} else {
				cs->tx_skb = skb;
				cs->tx_cnt = 0;
#ifdef L2FRAME_DEBUG		/* psa */
				if (cs->debug & L1_DEB_LAPD)
					Logl2Frame(cs, skb, "PH_DATA", 0);
#endif
				W6692_fill_fifo(cs);
			}
			break;
		case (PH_PULL | INDICATION):
			if (cs->tx_skb) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
				skb_queue_tail(&cs->sq, skb);
				break;
			}
			if (cs->debug & DEB_DLOG_HEX)
				LogFrame(cs, skb->data, skb->len);
			if (cs->debug & DEB_DLOG_VERBOSE)
				dlogframe(cs, skb, 0);
			cs->tx_skb = skb;
			cs->tx_cnt = 0;
#ifdef L2FRAME_DEBUG		/* psa */
			if (cs->debug & L1_DEB_LAPD)
				Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
#endif
			W6692_fill_fifo(cs);
			break;
		case (PH_PULL | REQUEST):
#ifdef L2FRAME_DEBUG		/* psa */
			if (cs->debug & L1_DEB_LAPD)
				debugl1(cs, "-> PH_REQUEST_PULL");
#endif
			if (!cs->tx_skb) {
				test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
				st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
			} else
				test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
			break;
		case (HW_RESET | REQUEST):
			if ((cs->dc.w6692.ph_state == W_L1IND_DRD))
				ph_command(cs, W_L1CMD_ECK);
			else {
				ph_command(cs, W_L1CMD_RST);
				cs->dc.w6692.ph_state = W_L1CMD_RST;
				W6692_new_ph(cs);
			}
			break;
		case (HW_ENABLE | REQUEST):
			ph_command(cs, W_L1CMD_ECK);
			break;
		case (HW_INFO3 | REQUEST):
			ph_command(cs, W_L1CMD_AR8);
			break;
		case (HW_TESTLOOP | REQUEST):
			val = 0;
			if (1 & (long) arg)
				val |= 0x0c;
			if (2 & (long) arg)
				val |= 0x3;
			/* !!! not implemented yet */
			break;
		case (HW_DEACTIVATE | RESPONSE):
			skb_queue_purge(&cs->rq);
			skb_queue_purge(&cs->sq);
			if (cs->tx_skb) {
				dev_kfree_skb_any(cs->tx_skb);
				cs->tx_skb = NULL;
			}
			if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
				del_timer(&cs->dbusytimer);
			if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
				W6692_sched_event(cs, D_CLEARBUSY);
			break;
		default:
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692_l1hw unknown %04x", pr);
			break;
	}
}
Beispiel #22
0
static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
{
	int i, j;
	int total_length;
	int stat;
	unsigned long timeout;
	unsigned long flags;

	elp_device *adapter = netdev_priv(dev);

	set_hsf(dev, 0);

	/* get the command code */
	timeout = jiffies + 2*HZ/100;
	while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
	if (time_after_eq(jiffies, timeout)) {
		TIMEOUT_MSG(__LINE__);
		return false;
	}
	pcb->command = inb_command(dev->base_addr);

	/* read the data length */
	timeout = jiffies + 3*HZ/100;
	while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
	if (time_after_eq(jiffies, timeout)) {
		TIMEOUT_MSG(__LINE__);
		pr_info("%s: status %02x\n", dev->name, stat);
		return false;
	}
	pcb->length = inb_command(dev->base_addr);

	if (pcb->length > MAX_PCB_DATA) {
		INVALID_PCB_MSG(pcb->length);
		adapter_reset(dev);
		return false;
	}
	/* read the data */
	spin_lock_irqsave(&adapter->lock, flags);
	for (i = 0; i < MAX_PCB_DATA; i++) {
		for (j = 0; j < 20000; j++) {
			stat = get_status(dev->base_addr);
			if (stat & ACRF)
				break;
		}
		pcb->data.raw[i] = inb_command(dev->base_addr);
		if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
			break;
	}
	spin_unlock_irqrestore(&adapter->lock, flags);
	if (i >= MAX_PCB_DATA) {
		INVALID_PCB_MSG(i);
		return false;
	}
	if (j >= 20000) {
		TIMEOUT_MSG(__LINE__);
		return false;
	}
	/* the last "data" byte was really the length! */
	total_length = pcb->data.raw[i];

	/* safety check total length vs data length */
	if (total_length != (pcb->length + 2)) {
		if (elp_debug >= 2)
			pr_warning("%s: mangled PCB received\n", dev->name);
		set_hsf(dev, HSF_PCB_NAK);
		return false;
	}

	if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
		if (test_and_set_bit(0, (void *) &adapter->busy)) {
			if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
				set_hsf(dev, HSF_PCB_NAK);
				pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
				pcb->command = 0;
				return true;
			} else {
				pcb->command = 0xff;
			}
		}
	}
	set_hsf(dev, HSF_PCB_ACK);
	return true;
}
Beispiel #23
0
static int __devinit
setup_gazelpci(struct IsdnCardState *cs)
{
	u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0;
	u_char pci_irq = 0, found;
	u_int nbseek, seekcard;

	printk(KERN_WARNING "Gazel: PCI card automatic recognition\n");

	found = 0;
	seekcard = PCI_DEVICE_ID_PLX_R685;
	for (nbseek = 0; nbseek < 4; nbseek++) {
		if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX,
					seekcard, dev_tel))) {
			if (pci_enable_device(dev_tel))
				return 1;
			pci_irq = dev_tel->irq;
			pci_ioaddr0 = pci_resource_start(dev_tel, 1);
			pci_ioaddr1 = pci_resource_start(dev_tel, 2);
			found = 1;
		}
		if (found)
			break;
		else {
			switch (seekcard) {
				case PCI_DEVICE_ID_PLX_R685:
					seekcard = PCI_DEVICE_ID_PLX_R753;
					break;
				case PCI_DEVICE_ID_PLX_R753:
					seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO;
					break;
				case PCI_DEVICE_ID_PLX_DJINN_ITOO:
					seekcard = PCI_DEVICE_ID_PLX_OLITEC;
					break;
			}
		}
	}
	if (!found) {
		printk(KERN_WARNING "Gazel: No PCI card found\n");
		return (1);
	}
	if (!pci_irq) {
		printk(KERN_WARNING "Gazel: No IRQ for PCI card found\n");
		return 1;
	}
	cs->hw.gazel.pciaddr[0] = pci_ioaddr0;
	cs->hw.gazel.pciaddr[1] = pci_ioaddr1;
	setup_isac(cs);
	pci_ioaddr1 &= 0xfffe;
	cs->hw.gazel.cfg_reg = pci_ioaddr0 & 0xfffe;
	cs->hw.gazel.ipac = pci_ioaddr1;
	cs->hw.gazel.isac = pci_ioaddr1 + 0x80;
	cs->hw.gazel.hscx[0] = pci_ioaddr1;
	cs->hw.gazel.hscx[1] = pci_ioaddr1 + 0x40;
	cs->hw.gazel.isacfifo = cs->hw.gazel.isac;
	cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0];
	cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1];
	cs->irq = pci_irq;
	cs->irq_flags |= IRQF_SHARED;

	switch (seekcard) {
		case PCI_DEVICE_ID_PLX_R685:
			printk(KERN_INFO "Gazel: Card PCI R685 found\n");
			cs->subtyp = R685;
			cs->dc.isac.adf2 = 0x87;
			printk(KERN_INFO
			    "Gazel: config irq:%d isac:0x%X  cfg:0x%X\n",
			cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg);
			printk(KERN_INFO
			       "Gazel: hscx A:0x%X  hscx B:0x%X\n",
			     cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]);
			break;
		case PCI_DEVICE_ID_PLX_R753:
		case PCI_DEVICE_ID_PLX_DJINN_ITOO:
		case PCI_DEVICE_ID_PLX_OLITEC:
			printk(KERN_INFO "Gazel: Card PCI R753 found\n");
			cs->subtyp = R753;
			test_and_set_bit(HW_IPAC, &cs->HW_Flags);
			printk(KERN_INFO
			    "Gazel: config irq:%d ipac:0x%X  cfg:0x%X\n",
			cs->irq, cs->hw.gazel.ipac, cs->hw.gazel.cfg_reg);
			break;
	}

	return (0);
}
Beispiel #24
0
static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = netdev_priv(dev);
	unsigned long target;
	unsigned long flags;

	/*
	 * make sure the length is even and no shorter than 60 bytes
	 */
	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			pr_debug("%s: transmit blocked\n", dev->name);
		return false;
	}

	dev->stats.tx_bytes += nlen;

	/*
	 * send the adapter a transmit packet command. Ignore segment and offset
	 * and make sure the length is even
	 */
	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	/* Unused */
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return false;
	}
	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = isa_virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = isa_virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	/* dma memory -> io */
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3)
		pr_debug("%s: DMA transfer started\n", dev->name);

	return true;
}
Beispiel #25
0
static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
	/*
	 * Wake up the handler thread for this action. In case the
	 * thread crashed and was killed we just pretend that we
	 * handled the interrupt. The hardirq handler has disabled the
	 * device interrupt, so no irq storm is lurking. If the
	 * RUNTHREAD bit is already set, nothing to do.
	 */
	if (test_bit(IRQTF_DIED, &action->thread_flags) ||
	    test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
		return;

	/*
	 * It's safe to OR the mask lockless here. We have only two
	 * places which write to threads_oneshot: This code and the
	 * irq thread.
	 *
	 * This code is the hard irq context and can never run on two
	 * cpus in parallel. If it ever does we have more serious
	 * problems than this bitmask.
	 *
	 * The irq threads of this irq which clear their "running" bit
	 * in threads_oneshot are serialized via desc->lock against
	 * each other and they are serialized against this code by
	 * IRQS_INPROGRESS.
	 *
	 * Hard irq handler:
	 *
	 *	spin_lock(desc->lock);
	 *	desc->state |= IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *	set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
	 *	desc->threads_oneshot |= mask;
	 *	spin_lock(desc->lock);
	 *	desc->state &= ~IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *
	 * irq thread:
	 *
	 * again:
	 *	spin_lock(desc->lock);
	 *	if (desc->state & IRQS_INPROGRESS) {
	 *		spin_unlock(desc->lock);
	 *		while(desc->state & IRQS_INPROGRESS)
	 *			cpu_relax();
	 *		goto again;
	 *	}
	 *	if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
	 *		desc->threads_oneshot &= ~mask;
	 *	spin_unlock(desc->lock);
	 *
	 * So either the thread waits for us to clear IRQS_INPROGRESS
	 * or we are waiting in the flow handler for desc->lock to be
	 * released before we reach this point. The thread also checks
	 * IRQTF_RUNTHREAD under desc->lock. If set it leaves
	 * threads_oneshot untouched and runs the thread another time.
	 */
	desc->threads_oneshot |= action->thread_mask;
	wake_up_process(action->thread);
}
Beispiel #26
0
/*---------------------------------------------------------------------------*/
int priv_ev_loop_run(void *loop_hndl)
{
	struct xio_ev_loop	*loop = loop_hndl;
	struct xio_ev_data	*tev;
	struct llist_node	*node;
	int cpu;

	clear_bit(XIO_EV_LOOP_STOP, &loop->states);

	switch (loop->flags) {
	case XIO_LOOP_GIVEN_THREAD:
		if (loop->ctx->worker != (uint64_t) get_current()) {
			ERROR_LOG("worker kthread(%p) is not current(%p).\n",
				  (void *) loop->ctx->worker, get_current());
			goto cleanup0;
		}
		/* no need to disable preemption */
		cpu = raw_smp_processor_id();
		if (loop->ctx->cpuid != cpu) {
			TRACE_LOG("worker on core(%d) scheduled to(%d).\n",
				  cpu, loop->ctx->cpuid);
			set_cpus_allowed_ptr(get_current(),
					     cpumask_of(loop->ctx->cpuid));
		}
		break;
	case XIO_LOOP_TASKLET:
		/* were events added to list while in STOP state ? */
		if (!llist_empty(&loop->ev_llist))
			priv_kick_tasklet(loop_hndl);
		return 0;
	case XIO_LOOP_WORKQUEUE:
		/* were events added to list while in STOP state ? */
		while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
			node = llist_reverse_order(node);
			while (node) {
				tev = llist_entry(node, struct xio_ev_data,
						  ev_llist);
				node = llist_next(node);
				tev->work.func = priv_ev_loop_run_work;
				queue_work_on(loop->ctx->cpuid, loop->workqueue,
					      &tev->work);
			}
		}
		return 0;
	default:
		/* undo */
		set_bit(XIO_EV_LOOP_STOP, &loop->states);
		return -1;
	}

retry_wait:
	wait_event_interruptible(loop->wait,
				 test_bit(XIO_EV_LOOP_WAKE, &loop->states));

retry_dont_wait:

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}

	/* "race point" */
	clear_bit(XIO_EV_LOOP_WAKE, &loop->states);

	if (unlikely(test_bit(XIO_EV_LOOP_STOP, &loop->states)))
		return 0;

	/* if a new entry was added while we were at "race point"
	 * than wait event might block forever as condition is false */
	if (llist_empty(&loop->ev_llist))
		goto retry_wait;

	/* race detected */
	if (!test_and_set_bit(XIO_EV_LOOP_WAKE, &loop->states))
		goto retry_dont_wait;

	/* was one wakeup was called */
	goto retry_wait;

cleanup0:
	set_bit(XIO_EV_LOOP_STOP, &loop->states);
	return -1;
}
static int igbvf_set_ringparam(struct net_device *netdev,
                               struct ethtool_ringparam *ring)
{
	struct igbvf_adapter *adapter = netdev_priv(netdev);
	struct igbvf_ring *temp_ring;
	int err = 0;
	u32 new_rx_count, new_tx_count;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
	new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);

	new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
	new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == adapter->tx_ring->count) &&
	    (new_rx_count == adapter->rx_ring->count)) {
		
		return 0;
	}

	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
		msleep(1);

	if (!netif_running(adapter->netdev)) {
		adapter->tx_ring->count = new_tx_count;
		adapter->rx_ring->count = new_rx_count;
		goto clear_reset;
	}

	temp_ring = vmalloc(sizeof(struct igbvf_ring));
	if (!temp_ring) {
		err = -ENOMEM;
		goto clear_reset;
	}

	igbvf_down(adapter);

	if (new_tx_count != adapter->tx_ring->count) {
		memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));

		temp_ring->count = new_tx_count;
		err = igbvf_setup_tx_resources(adapter, temp_ring);
		if (err)
			goto err_setup;

		igbvf_free_tx_resources(adapter->tx_ring);

		memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
	}

	if (new_rx_count != adapter->rx_ring->count) {
		memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));

		temp_ring->count = new_rx_count;
		err = igbvf_setup_rx_resources(adapter, temp_ring);
		if (err)
			goto err_setup;

		igbvf_free_rx_resources(adapter->rx_ring);

		memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
	}
err_setup:
	igbvf_up(adapter);
	vfree(temp_ring);
clear_reset:
	clear_bit(__IGBVF_RESETTING, &adapter->state);
	return err;
}
static long acq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int options, retval = -EINVAL;
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	static const struct watchdog_info ident = {
		.options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
		.firmware_version = 1,
		.identity = WATCHDOG_NAME,
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;

	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);

	case WDIOC_SETOPTIONS:
	{
		if (get_user(options, p))
			return -EFAULT;
		if (options & WDIOS_DISABLECARD) {
			acq_stop();
			retval = 0;
		}
		if (options & WDIOS_ENABLECARD) {
			acq_keepalive();
			retval = 0;
		}
		return retval;
	}
	case WDIOC_KEEPALIVE:
		acq_keepalive();
		return 0;

	case WDIOC_GETTIMEOUT:
		return put_user(WATCHDOG_HEARTBEAT, p);

	default:
		return -ENOTTY;
	}
}

static int acq_open(struct inode *inode, struct file *file)
{
	if (test_and_set_bit(0, &acq_is_open))
		return -EBUSY;

	if (nowayout)
		__module_get(THIS_MODULE);

	
	acq_keepalive();
	return nonseekable_open(inode, file);
}

static int acq_close(struct inode *inode, struct file *file)
{
	if (expect_close == 42) {
		acq_stop();
	} else {
		pr_crit("Unexpected close, not stopping watchdog!\n");
		acq_keepalive();
	}
	clear_bit(0, &acq_is_open);
	expect_close = 0;
	return 0;
}


static const struct file_operations acq_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.write		= acq_write,
	.unlocked_ioctl	= acq_ioctl,
	.open		= acq_open,
	.release	= acq_close,
};

static struct miscdevice acq_miscdev = {
	.minor	= WATCHDOG_MINOR,
	.name	= "watchdog",
	.fops	= &acq_fops,
};


static int __devinit acq_probe(struct platform_device *dev)
{
	int ret;

	if (wdt_stop != wdt_start) {
		if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) {
			pr_err("I/O address 0x%04x already in use\n", wdt_stop);
			ret = -EIO;
			goto out;
		}
	}

	if (!request_region(wdt_start, 1, WATCHDOG_NAME)) {
		pr_err("I/O address 0x%04x already in use\n", wdt_start);
		ret = -EIO;
		goto unreg_stop;
	}
	ret = misc_register(&acq_miscdev);
	if (ret != 0) {
		pr_err("cannot register miscdev on minor=%d (err=%d)\n",
		       WATCHDOG_MINOR, ret);
		goto unreg_regions;
	}
	pr_info("initialized. (nowayout=%d)\n", nowayout);

	return 0;
unreg_regions:
	release_region(wdt_start, 1);
unreg_stop:
	if (wdt_stop != wdt_start)
		release_region(wdt_stop, 1);
out:
	return ret;
}

static int __devexit acq_remove(struct platform_device *dev)
{
	misc_deregister(&acq_miscdev);
	release_region(wdt_start, 1);
	if (wdt_stop != wdt_start)
		release_region(wdt_stop, 1);

	return 0;
}
Beispiel #29
0
static void hisax_sched_event(struct IsdnCardState *cs, int event)
{
	test_and_set_bit(event, &cs->event);
	schedule_work(&cs->tqueue);
}
Beispiel #30
0
/*
 * see if we have space for a number of pages and/or a number of files in the
 * cache
 */
int cachefiles_has_space(struct cachefiles_cache *cache,
			 unsigned fnr, unsigned bnr)
{
	struct kstatfs stats;
	int ret;

	//_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
	//       (unsigned long long) cache->frun,
	//       (unsigned long long) cache->fcull,
	//       (unsigned long long) cache->fstop,
	//       (unsigned long long) cache->brun,
	//       (unsigned long long) cache->bcull,
	//       (unsigned long long) cache->bstop,
	//       fnr, bnr);

	/* find out how many pages of blockdev are available */
	memset(&stats, 0, sizeof(stats));

	ret = vfs_statfs(cache->mnt->mnt_root, &stats);
	if (ret < 0) {
		if (ret == -EIO)
			cachefiles_io_error(cache, "statfs failed");
		_leave(" = %d", ret);
		return ret;
	}

	stats.f_bavail >>= cache->bshift;

	//_debug("avail %llu,%llu",
	//       (unsigned long long) stats.f_ffree,
	//       (unsigned long long) stats.f_bavail);

	/* see if there is sufficient space */
	if (stats.f_ffree > fnr)
		stats.f_ffree -= fnr;
	else
		stats.f_ffree = 0;

	if (stats.f_bavail > bnr)
		stats.f_bavail -= bnr;
	else
		stats.f_bavail = 0;

	ret = -ENOBUFS;
	if (stats.f_ffree < cache->fstop ||
	    stats.f_bavail < cache->bstop)
		goto begin_cull;

	ret = 0;
	if (stats.f_ffree < cache->fcull ||
	    stats.f_bavail < cache->bcull)
		goto begin_cull;

	if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
	    stats.f_ffree >= cache->frun &&
	    stats.f_bavail >= cache->brun &&
	    test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
	    ) {
		_debug("cease culling");
		cachefiles_state_changed(cache);
	}

	//_leave(" = 0");
	return 0;

begin_cull:
	if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
		_debug("### CULL CACHE ###");
		cachefiles_state_changed(cache);
	}

	_leave(" = %d", ret);
	return ret;
}