Example #1
0
static void rtcfg_conn_check_cfg_timeout(struct rtcfg_connection *conn)
{
    struct rtcfg_device *rtcfg_dev;


    if (!conn->cfg_timeout)
        return;

    if (rtdm_clock_read() >= conn->last_frame + conn->cfg_timeout) {
        rtcfg_dev = &device[conn->ifindex];

        rtcfg_dev->stations_found--;
        if (conn->state == RTCFG_CONN_STAGE_2)
            rtcfg_dev->spec.srv.clients_configured--;

        rtcfg_next_conn_state(conn, RTCFG_CONN_SEARCHING);
        conn->cfg_offs = 0;
        conn->flags    = 0;

#ifdef CONFIG_RTNET_RTIPV4
        if (conn->addr_type == RTCFG_ADDR_IP) {
            struct rtnet_device *rtdev;

            /* MAC address yet unknown -> use broadcast address */
            rtdev = rtdev_get_by_index(conn->ifindex);
            if (rtdev == NULL)
                return;
            memcpy(conn->mac_addr, rtdev->broadcast, MAX_ADDR_LEN);
            rtdev_dereference(rtdev);
        }
#endif /* CONFIG_RTNET_RTIPV4 */
    }
}
Example #2
0
void rtcan_rcv(struct rtcan_device *dev, struct rtcan_skb *skb)
{
    nanosecs_abs_t timestamp = rtdm_clock_read();
    /* Entry in reception list, begin with head */
    struct rtcan_recv *recv_listener = dev->recv_list;
    struct rtcan_rb_frame *frame = &skb->rb_frame;

    /* Copy timestamp to skb */
    memcpy((void *)&skb->rb_frame + skb->rb_frame_size,
	   &timestamp, RTCAN_TIMESTAMP_SIZE);

    if ((frame->can_id & CAN_ERR_FLAG)) {
	dev->err_count++;
	while (recv_listener != NULL) {
	    if ((frame->can_id & recv_listener->sock->err_mask)) {
		recv_listener->match_count++;
		rtcan_rcv_deliver(recv_listener, skb);
	    }
	    recv_listener = recv_listener->next;
	}
    } else {
	dev->rx_count++;
	while (recv_listener != NULL) {
	    if (rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) {
		recv_listener->match_count++;
		rtcan_rcv_deliver(recv_listener, skb);
	    }
	    recv_listener = recv_listener->next;
	}
    }
}
Example #3
0
int tims_clock_ioctl(rtdm_user_info_t *user_info, unsigned int request,
                     void *arg)
{
    rtdm_lockctx_t lock_ctx;
    nanosecs_rel_t result = 0;

    switch(clock_sync_mode) {
        case SYNC_RTNET:
            sync_dev_ctx->ops->ioctl_rt(sync_dev_ctx, NULL,
                                        RTMAC_RTIOC_TIMEOFFSET, &result);
            break;

        case SYNC_CAN_SLAVE:
        case SYNC_SER_SLAVE:
            rtdm_lock_get_irqsave(&sync_lock, lock_ctx);
            result = clock_offset;
            rtdm_lock_put_irqrestore(&sync_lock, lock_ctx);
            break;
    }

    result += sync_delay;

    if (request == TIMS_RTIOC_GETTIME)
        result += rtdm_clock_read();

    return rtdm_safe_copy_to_user(user_info, arg, &result, sizeof(result));

}
Example #4
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
    unsigned short          hash;
    struct rtpacket_type    *pt_entry;
    rtdm_lockctx_t          context;


    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (skb->xmit_stamp)
        *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);

    /* make sure that critical fields are re-intialised */
    skb->chain_end = skb;

    /* parse the Ethernet header as usual */
    skb->protocol = rt_eth_type_trans(skb, rtdev);
    skb->nh.raw   = skb->data;

    rtdev_reference(rtdev);

    rtcap_report_incoming(skb);

    hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK;

    rtdm_lock_get_irqsave(&rt_packets_lock, context);

    list_for_each_entry(pt_entry, &rt_packets[hash], list_entry)
        if (pt_entry->type == skb->protocol) {
            pt_entry->refcount++;
            rtdm_lock_put_irqrestore(&rt_packets_lock, context);

            pt_entry->handler(skb, pt_entry);

            rtdm_lock_get_irqsave(&rt_packets_lock, context);
            pt_entry->refcount--;
            rtdm_lock_put_irqrestore(&rt_packets_lock, context);

            goto out;
        }

    rtdm_lock_put_irqrestore(&rt_packets_lock, context);

    /* don't warn if running in promiscuous mode (RTcap...?) */
    if ((rtdev->flags & IFF_PROMISC) == 0)
        rtdm_printk("RTnet: unknown layer-3 protocol\n");

    kfree_rtskb(skb);

  out:
    rtdev_dereference(rtdev);
    return 0;
}
Example #5
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *rtskb, struct rtnet_device *rtdev)
{
    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (rtskb->xmit_stamp)
	*rtskb->xmit_stamp =
	    cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp);

    /* make sure that critical fields are re-intialised */
    rtskb->chain_end = rtskb;

    /* parse the Ethernet header as usual */
    rtskb->protocol = rt_eth_type_trans(rtskb, rtdev);

    rt_stack_deliver(rtskb);

    return 0;
}
Example #6
0
static void rtcfg_conn_check_heartbeat(struct rtcfg_connection *conn)
{
    u64                 timeout;
    struct rtcfg_device *rtcfg_dev;


    timeout = device[conn->ifindex].spec.srv.heartbeat_timeout;
    if (!timeout)
        return;

    if (rtdm_clock_read() >= conn->last_frame + timeout) {
        rtcfg_dev = &device[conn->ifindex];

        rtcfg_dev->stations_found--;
        rtcfg_dev->stations_ready--;
        rtcfg_dev->spec.srv.clients_configured--;

        rtcfg_send_dead_station(conn);

        rtcfg_next_conn_state(conn, RTCFG_CONN_DEAD);
        conn->cfg_offs = 0;
        conn->flags    = 0;

#ifdef CONFIG_RTNET_RTIPV4
        if ((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) {
            struct rtnet_device *rtdev = rtdev_get_by_index(conn->ifindex);

            rt_ip_route_del_host(conn->addr.ip_addr, rtdev);

            if (rtdev == NULL)
                return;

            if (!(conn->addr_type & FLAG_ASSIGN_ADDR_BY_MAC))
                /* MAC address yet unknown -> use broadcast address */
                memcpy(conn->mac_addr, rtdev->broadcast, MAX_ADDR_LEN);

            rtdev_dereference(rtdev);
        }
#endif /* CONFIG_RTNET_RTIPV4 */
    }
}
Example #7
0
static int
fec_enet_interrupt(rtdm_irq_t *irq_handle)
{
	struct rtnet_device *ndev =
		rtdm_irq_get_arg(irq_handle, struct rtnet_device); /* RTnet */
	struct fec_enet_private *fep = rtnetdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = RTDM_IRQ_NONE;
	/* RTnet */
	nanosecs_abs_t time_stamp = rtdm_clock_read();
	int packets = 0;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & FEC_ENET_RXF) {
			ret = RTDM_IRQ_HANDLED;
			fec_enet_rx(ndev, &packets, &time_stamp);
		}

		/* Transmit OK, or non-fatal error. Update the buffer
		 * descriptors. FEC handles all errors, we just discover
		 * them as part of the transmit process.
		 */
		if (int_events & FEC_ENET_TXF) {
			ret = RTDM_IRQ_HANDLED;
			fec_enet_tx(ndev);
		}

		if (int_events & FEC_ENET_MII) {
			ret = RTDM_IRQ_HANDLED;
			rtdm_nrtsig_pend(&fep->mdio_done_sig);
		}
	} while (int_events);

	if (packets > 0)
		rt_mark_stack_mgr(ndev);

	return ret;
}
int rt2x00_interrupt(rtdm_irq_t *irq_handle) {

    nanosecs_t time_stamp = rtdm_clock_read();

    struct rtnet_device   * rtnet_dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
    struct _rt2x00_device * device    = rtwlan_priv(rtnet_dev);
    struct _rt2x00_pci	  * rt2x00pci = rt2x00_priv(device);
    struct rtwlan_device  * rtwlan    = rtnetdev_priv(rtnet_dev);
    unsigned int old_packet_cnt       = rtwlan->stats.rx_packets;
    u32			reg           = 0x00000000;
    rtdm_lockctx_t context;

    rtdm_lock_get_irqsave(&rtwlan->lock, context);

    rt2x00_register_read(rt2x00pci, CSR7, &reg);
    rt2x00_register_write(rt2x00pci, CSR7, reg);

    if(!reg)
        return RTDM_IRQ_NONE;

    if(rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))		/* Beacon timer expired interrupt. */
        DEBUG("Beacon timer expired.\n");
    if(rt2x00_get_field32(reg, CSR7_RXDONE))		/* Rx ring done interrupt. */
        rt2x00_interrupt_rxdone(&rt2x00pci->rx, &time_stamp);
    if(rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))	/* Atim ring transmit done interrupt. */
        DEBUG("AtimTxDone.\n");
    if(rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))	/* Priority ring transmit done interrupt. */
        DEBUG("PrioTxDone.\n");
    if(rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))	/* Tx ring transmit done interrupt. */
        rt2x00_interrupt_txdone(&rt2x00pci->tx);

    if (old_packet_cnt != rtwlan->stats.rx_packets)
        rt_mark_stack_mgr(rtnet_dev);

    rtdm_lock_put_irqrestore(&rtwlan->lock, context);

    return RTDM_IRQ_HANDLED;
}
Example #9
0
static int rtdmtest_task(void *arg)
{
	int ret;
	nanosecs_abs_t wakeup;
	struct rttst_rtdmtest_config *config =
		(struct rttst_rtdmtest_config *)arg;

	printk("%s: started with delay=%lld\n", __FUNCTION__, task_period);
        wakeup = rtdm_clock_read();

	while (1) {
#if 0
		if ((ret = rtdm_task_sleep(task_period)))
			break;
#else
		wakeup += task_period;
		if ((ret = rtdm_task_sleep_until(wakeup)))
			break;
#endif
		//printk("%s: time=%lld\n", __FUNCTION__, rtdm_clock_read());
	}
	printk("%s terminating, ret=%d\n", __FUNCTION__, ret);
	return ret;
}
Example #10
0
static int rtdm_my_isr(rtdm_irq_t *irq_context)
{

	MY_DEV *up=rtdm_irq_get_arg(irq_context,MY_DEV);

	up->systime1 = rtdm_clock_read();

	up->timeout = up->systime1 - up->systime;

	printk("Interrupt Latency=%dl\n",up->timeout);

	up->systime1=0;
	up->systime=0;

	unsigned int iir,lsr;
	unsigned int type;
	irqreturn_t ret=IRQ_NONE;
	int err;
	int max_count = 256;
	rtdm_lockctx_t context1;

	printk("I am in rtdm_my_isr......!!!\n");

	printk("Local struct up=%x\n",up);

        err = rtdm_irq_disable(&up->irq_handle);
        if(err<0)
             rtdm_printk("error in rtdm_irq_enable\n");
        rtdm_lock_get_irqsave(&up->lock,context1);

	do{
	iir = serial_in(up,UART_IIR);
	if(iir & UART_IIR_NO_INT)
		break;

	ret=IRQ_HANDLED;
	lsr = serial_in(up,UART_LSR);
	type = iir & 0x3e;
	
		switch(type)
		{
			case UART_IIR_THRI:
			printk("type of int:UART_IIR_THRI\n");
			transmit_chars(up,lsr);
			rtdm_event_signal(&up->w_event_tx);
			break;

			case UART_IIR_RX_TIMEOUT:
			/*FALLTHROUGH*/

			case UART_IIR_RDI:
				printk("type of int:UART_IIR_RDI\n");
				serial_omap_rdi(up,lsr);	
				 rtdm_event_signal(&up->w_event_rx);
				break;
			
			case UART_IIR_RLSI:
				printk("type of int:UART_IIR_RLSI\n");
//				serial_omap_rlsi(up,lsr);
				break;
			
			case UART_IIR_CTS_RTS_DSR:
				break;
			
			case UART_IIR_XOFF:
			/*simpleThrough*/
			default:
				break;
		}
	}while(!(iir & UART_IIR_NO_INT) && max_count--);

      	rtdm_lock_put_irqrestore(&up->lock,context1);
        err = rtdm_irq_enable(&up->irq_handle);
        if(err<0)
              rtdm_printk("error in rtdm_irq_enable\n");

	printk("rtdm_irq ended\n");
	
	 up->systime = rtdm_clock_read();
	
	return RTDM_IRQ_HANDLED;
}
Example #11
0
static ssize_t uart_wr_rt(struct rtdm_dev_context *context,rtdm_user_info_t * user_info,const void *buf, size_t nbyte)
{
	int ret=0;
	int err;	
	int count;
	char c;

	MY_DEV *up=(MY_DEV *)context->device->device_data;
	
	char *tmp;
	up->buf_len_tx = nbyte;
	
	printk("uart_wr_rt  start\n");
	tmp=rtdm_malloc(nbyte);
	
	if ((rtdm_safe_copy_from_user(user_info,tmp, buf, up->buf_len_tx)))
                rtdm_printk("ERROR : can't copy data to driver\n");
	

	 count=nbyte;
	while(count--)
	{
	write_buffer(up,*tmp);
	tmp=tmp+1;
	
//	up->buf_tx=(char *)tmp;
//	printk("up->buf_tx=%x\n",*up->buf_tx);
			
	//enable Trasmitter holding Register
	if (!(up->ier & UART_IER_THRI)) 
	{
		up->ier |= UART_IER_THRI;
		 up->systime = rtdm_clock_read();
		serial_out(up, UART_IER, up->ier);
	}

	}

	printk("Tx interrupt enable\n");
	printk("rtdm_event_wait before\n");

	err=rtdm_event_wait(&up->w_event_tx);
        if(err<0)
        {
                dev_err(up->dev,"controller timed out\n");
                rtdm_printk("rtdm_event_timedwait: timeout\n");
                return -ETIMEDOUT;
        }

	up->systime1 = rtdm_clock_read();

	up->timeout=(up->systime1)-(up->systime);

	printk("scheduling latency=%ld\n",up->timeout);

	if(err==0)
	{
	 	ret=nbyte;
	}
	printk("rtdm_event_wait after\n");
	printk("uart_wr_rt end\n");
	rtdm_free(tmp);
	return ret;
}
Example #12
0
static int
tulip_start_xmit(struct /*RTnet*/rtskb *skb, /*RTnet*/struct rtnet_device *rtdev)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry;
	u32 flag;
	dma_addr_t mapping;
	/*RTnet*/
	rtdm_lockctx_t context;


	rtdm_lock_get_irqsave(&tp->lock, context);

	/* TODO: move to rtdev_xmit, use queue */
	if (rtnetif_queue_stopped(rtdev)) {
		dev_kfree_rtskb(skb);
		tp->stats.tx_dropped++;

		rtdm_lock_put_irqrestore(&tp->lock, context);
		return 0;
	}
	/*RTnet*/

	/* Calculate the next Tx descriptor entry. */
	entry = tp->cur_tx % TX_RING_SIZE;

	tp->tx_buffers[entry].skb = skb;
	mapping = pci_map_single(tp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
	tp->tx_buffers[entry].mapping = mapping;
	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);

	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
		flag = 0x60000000; /* No interrupt */
	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
		flag = 0xe0000000; /* Tx-done intr. */
	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
		flag = 0x60000000; /* No Tx-done intr. */
	} else {		/* Leave room for set_rx_mode() to fill entries. */
		flag = 0xe0000000; /* Tx-done intr. */
		rtnetif_stop_queue(rtdev);
	}
	if (entry == TX_RING_SIZE-1)
		flag = 0xe0000000 | DESC_RING_WRAP;

	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
	/* if we were using Transmit Automatic Polling, we would need a
	 * wmb() here. */
	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);

	/*RTnet*/
	/* get and patch time stamp just before the transmission */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
	/*RTnet*/

	wmb();

	tp->cur_tx++;

	/* Trigger an immediate transmit demand. */
	outl(0, rtdev->base_addr + CSR1);

	/*RTnet*/
	rtdm_lock_put_irqrestore(&tp->lock, context);
	/*RTnet*/

	return 0;
}
Example #13
0
/* The interrupt handler.
 * This is called from the CPM handler, not the MPC core interrupt.
 */
static int scc_enet_interrupt(rtdm_irq_t *irq_handle)
{
	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
	int packets = 0;
	struct	scc_enet_private *cep;
	volatile cbd_t	*bdp;
	ushort	int_events;
	int	must_restart;
	nanosecs_abs_t time_stamp = rtdm_clock_read();


	cep = (struct scc_enet_private *)rtdev->priv;

	/* Get the interrupt events that caused us to be here.
	*/
	int_events = cep->sccp->scc_scce;
	cep->sccp->scc_scce = int_events;
	must_restart = 0;

	/* Handle receive event in its own function.
	*/
	if (int_events & SCCE_ENET_RXF) {
		scc_enet_rx(rtdev, &packets, &time_stamp);
	}

	/* Check for a transmit error.  The manual is a little unclear
	 * about this, so the debug code until I get it figured out.  It
	 * appears that if TXE is set, then TXB is not set.  However,
	 * if carrier sense is lost during frame transmission, the TXE
	 * bit is set, "and continues the buffer transmission normally."
	 * I don't know if "normally" implies TXB is set when the buffer
	 * descriptor is closed.....trial and error :-).
	 */

	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
	*/
	if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
	    rtdm_lock_get(&cep->lock);
	    bdp = cep->dirty_tx;
	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
		RT_DEBUG(__FUNCTION__": Tx ok\n");
		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
		    break;

		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
			cep->stats.tx_heartbeat_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
			cep->stats.tx_window_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
			cep->stats.tx_aborted_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
			cep->stats.tx_fifo_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
			cep->stats.tx_carrier_errors++;


		/* No heartbeat or Lost carrier are not really bad errors.
		 * The others require a restart transmit command.
		 */
		if (bdp->cbd_sc &
		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
			must_restart = 1;
			cep->stats.tx_errors++;
		}

		cep->stats.tx_packets++;

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (bdp->cbd_sc & BD_ENET_TX_DEF)
			cep->stats.collisions++;

		/* Free the sk buffer associated with this last transmit.
		*/
		dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]);
		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted.
		*/
		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
			bdp = cep->tx_bd_base;
		else
			bdp++;

		/* I don't know if we can be held off from processing these
		 * interrupts for more than one frame time.  I really hope
		 * not.  In such a case, we would now want to check the
		 * currently available BD (cur_tx) and determine if any
		 * buffers between the dirty_tx and cur_tx have also been
		 * sent.  We would want to process anything in between that
		 * does not have BD_ENET_TX_READY set.
		 */

		/* Since we have freed up a buffer, the ring is no longer
		 * full.
		 */
		if (cep->tx_full) {
			cep->tx_full = 0;
			if (rtnetif_queue_stopped(rtdev))
				rtnetif_wake_queue(rtdev);
		}

		cep->dirty_tx = (cbd_t *)bdp;
	    }

	    if (must_restart) {
		volatile cpm8xx_t *cp;

		/* Some transmit errors cause the transmitter to shut
		 * down.  We now issue a restart transmit.  Since the
		 * errors close the BD and update the pointers, the restart
		 * _should_ pick up without having to reset any of our
		 * pointers either.
		 */
		cp = cpmp;
		cp->cp_cpcr =
		    mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG;
		while (cp->cp_cpcr & CPM_CR_FLG);
	    }
	    rtdm_lock_put(&cep->lock);
	}

	/* Check for receive busy, i.e. packets coming but no place to
	 * put them.  This "can't happen" because the receive interrupt
	 * is tossing previous frames.
	 */
	if (int_events & SCCE_ENET_BSY) {
		cep->stats.rx_dropped++;
		rtdm_printk("CPM ENET: BSY can't happen.\n");
	}

	if (packets > 0)
		rt_mark_stack_mgr(rtdev);
	return RTDM_IRQ_HANDLED;
}
Example #14
0
static int
fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *ndev)
{
	struct fec_enet_private *fep = rtnetdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	void *bufaddr;
	unsigned short	status;
	unsigned long context;

	if (!fep->link) {
		/* Link is down or autonegotiation is in progress. */
		printk("%s: tx link down!.\n", ndev->name);
		rtnetif_stop_queue(ndev);
		return 1;	/* RTnet: will call kfree_rtskb() */
	}

	rtdm_lock_get_irqsave(&fep->hw_lock, context);

	/* RTnet */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
					       *skb->xmit_stamp);

	/* Fill in a Tx ring entry */
	bdp = fep->cur_tx;

	status = bdp->cbd_sc;

	if (status & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since ndev->tbusy should be set.
		 */
		printk("%s: tx queue full!.\n", ndev->name);
		rtdm_lock_put_irqrestore(&fep->hw_lock, context);
		return 1;	/* RTnet: will call kfree_rtskb() */
	}

	/* Clear all of the status flags */
	status &= ~BD_ENET_TX_STATS;

	/* Set buffer length and buffer pointer */
	bufaddr = skb->data;
	bdp->cbd_datlen = skb->len;

	/*
	 * On some FEC implementations data must be aligned on
	 * 4-byte boundaries. Use bounce buffers to copy data
	 * and get it aligned. Ugh.
	 */
	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
		unsigned int index;
		index = bdp - fep->tx_bd_base;
		memcpy(fep->tx_bounce[index], skb->data, skb->len);
		bufaddr = fep->tx_bounce[index];
	}

	/*
	 * Some design made an incorrect assumption on endian mode of
	 * the system that it's running on. As the result, driver has to
	 * swap every frame going to and coming from the controller.
	 */
	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
		swap_buffer(bufaddr, skb->len);

	/* Save skb pointer */
	fep->tx_skbuff[fep->skb_cur] = skb;

	fep->stats.tx_bytes += skb->len;
	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
			FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);

	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
	 * it's the last BD of the frame, and to put the CRC on the end.
	 */
	status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
	bdp->cbd_sc = status;

	/* Trigger transmission start */
	writel(0, fep->hwp + FEC_X_DES_ACTIVE);

	/* If this was the last BD in the ring, start at the beginning again. */
	if (status & BD_ENET_TX_WRAP)
		bdp = fep->tx_bd_base;
	else
		bdp++;

	if (bdp == fep->dirty_tx) {
		fep->tx_full = 1;
		rtnetif_stop_queue(ndev);
	}

	fep->cur_tx = bdp;

	rtdm_lock_put_irqrestore(&fep->hw_lock, context);

	return NETDEV_TX_OK;
}
Example #15
0
static void sync_task_func(void *arg)
{
    int             ret;
    rtdm_lockctx_t  lock_ctx;
    nanosecs_abs_t  timestamp;
    nanosecs_abs_t  timestamp_master;
    rtser_event_t   ser_rx_event;

    can_frame_t can_frame = {
        .can_id = clock_sync_can_id,
        .can_dlc = sizeof(timestamp),
    };
    struct iovec iov = {
        .iov_base = &can_frame,
        .iov_len = sizeof(can_frame_t),
    };
    struct msghdr msg = {
        .msg_name = NULL,
        .msg_namelen = 0,
        .msg_iov = &iov,
        .msg_iovlen = 1,
        .msg_control = NULL,
        .msg_controllen = 0,
    };

    if (clock_sync_mode == SYNC_CAN_SLAVE) {
        msg.msg_control = &timestamp;
        msg.msg_controllen = sizeof(timestamp);
    }

    while (1) {
        switch (clock_sync_mode) {
            case SYNC_SER_MASTER:
                timestamp = cpu_to_be64(rtdm_clock_read());

                ret = sync_dev_ctx->ops->write_rt(sync_dev_ctx, NULL,
                    &timestamp, sizeof(timestamp));
                if (ret != sizeof(timestamp)) {
                    tims_error("[CLOCK SYNC]: can't write serial time stamp, "
                               "code = %d\n", ret);
                    goto exit_task;
                }

                rtdm_task_wait_period();
                break;

            case SYNC_SER_SLAVE:
                ret = sync_dev_ctx->ops->ioctl_rt(sync_dev_ctx, NULL,
                    RTSER_RTIOC_WAIT_EVENT, &ser_rx_event);
                if (ret < 0) {
                    tims_error("[CLOCK SYNC]: can't read serial time stamp, "
                               "code = %d\n", ret);
                    goto exit_task;
                }

                ret = sync_dev_ctx->ops->read_rt(sync_dev_ctx, NULL,
                    &timestamp_master, sizeof(timestamp_master));
                if (ret != sizeof(timestamp_master)) {
                    tims_error("[CLOCK SYNC]: can't read serial time stamp, "
                               "code = %d\n", ret);
                    goto exit_task;
                }

                timestamp_master = be64_to_cpu(timestamp_master);

                rtdm_lock_get_irqsave(&sync_lock, lock_ctx);
                clock_offset =
                    timestamp_master - ser_rx_event.rxpend_timestamp;
                rtdm_lock_put_irqrestore(&sync_lock, lock_ctx);
                break;

            case SYNC_CAN_MASTER:
                // workaround for kernel working on user data
                iov.iov_len = sizeof(can_frame_t);
                iov.iov_base = &can_frame;
                // workaround end
                *(nanosecs_abs_t *)can_frame.data =
                    cpu_to_be64(rtdm_clock_read());

                ret = sync_dev_ctx->ops->sendmsg_rt(sync_dev_ctx, NULL,
                                                    &msg, 0);
                if (ret < 0) {
                    tims_error("[CLOCK SYNC]: can't send CAN time stamp, "
                               "code = %d\n", ret);
                    goto exit_task;
                }

                rtdm_task_wait_period();
                break;

            case SYNC_CAN_SLAVE:
                // workaround for kernel working on user data
                iov.iov_len = sizeof(can_frame_t);
                iov.iov_base = &can_frame;
                // workaround end

                ret = sync_dev_ctx->ops->recvmsg_rt(sync_dev_ctx, NULL,
                                                    &msg, 0);
                if (ret < 0) {
                    tims_error("[CLOCK SYNC]: can't receive CAN time stamp, "
                               "code = %d\n", ret);
                    return;
                }

                timestamp_master =
                    be64_to_cpu(*(nanosecs_abs_t *)can_frame.data);

                rtdm_lock_get_irqsave(&sync_lock, lock_ctx);
                clock_offset = timestamp_master - timestamp;
                rtdm_lock_put_irqrestore(&sync_lock, lock_ctx);
                break;
        }
    }

 exit_task:
    rtdm_context_unlock(sync_dev_ctx);
}


static __initdata char *mode_str[] = {
    "Local Clock", "RTnet", "CAN Master", "CAN Slave",
    "Serial Master", "Serial Slave"
};

static __initdata struct rtser_config sync_serial_config = {
    .config_mask        = RTSER_SET_BAUD | RTSER_SET_FIFO_DEPTH |
                          RTSER_SET_TIMESTAMP_HISTORY | RTSER_SET_EVENT_MASK,
    .baud_rate          = 115200,
    .fifo_depth         = RTSER_FIFO_DEPTH_8,
    .timestamp_history  = RTSER_RX_TIMESTAMP_HISTORY,
    .event_mask         = RTSER_EVENT_RXPEND,
};

int __init tims_clock_init(void)
{
    struct can_filter   filter;
    int                 nr_filters = 1;
    struct ifreq        can_ifr;
    struct sockaddr_can can_addr;
    int                 ret;

    if (clock_sync_mode < SYNC_NONE || clock_sync_mode > SYNC_SER_SLAVE) {
        tims_error("invalid clock_sync_mode %d", clock_sync_mode);
        return -EINVAL;
    }

    printk("TIMS: clock sync mode is %s\n", mode_str[clock_sync_mode]);
    printk("TIMS: clock sync dev is %s\n", clock_sync_dev);

    rtdm_lock_init(&sync_lock);

    switch(clock_sync_mode) {
        case SYNC_NONE:
            return 0;

        case SYNC_RTNET:
            sync_dev_fd = rt_dev_open(clock_sync_dev, O_RDONLY);
            if (sync_dev_fd < 0)
                goto sync_dev_error;
            set_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags);
            break;

        case SYNC_CAN_MASTER:
        case SYNC_CAN_SLAVE:
            sync_dev_fd = rt_dev_socket(PF_CAN, SOCK_RAW, 0);
            if (sync_dev_fd < 0) {
                tims_error("[CLOCK SYNC]: error opening CAN socket: %d\n",
                           sync_dev_fd);
                return sync_dev_fd;
            }
            set_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags);

            strcpy(can_ifr.ifr_name, clock_sync_dev);
            ret = rt_dev_ioctl(sync_dev_fd, SIOCGIFINDEX, &can_ifr);
            if (ret) {
                tims_info("[CLOCK SYNC]: error resolving CAN interface: %d\n",
                          ret);
                return ret;
            }

            if (clock_sync_mode == SYNC_CAN_MASTER)
                nr_filters = 0;
            else {
                filter.can_id   = clock_sync_can_id;
                filter.can_mask = 0xFFFFFFFF;
            }

            ret = rt_dev_setsockopt(sync_dev_fd, SOL_CAN_RAW, CAN_RAW_FILTER,
                                    &filter, nr_filters*sizeof(can_filter_t));
            if (ret < 0)
                goto config_error;

            /* Bind socket to default CAN ID */
            can_addr.can_family  = AF_CAN;
            can_addr.can_ifindex = can_ifr.ifr_ifindex;

            ret = rt_dev_bind(sync_dev_fd, (struct sockaddr *)&can_addr,
                              sizeof(can_addr));
            if (ret < 0)
                goto config_error;

            /* Enable timestamps for incoming packets */
            ret = rt_dev_ioctl(sync_dev_fd, RTCAN_RTIOC_TAKE_TIMESTAMP,
                               RTCAN_TAKE_TIMESTAMPS);
            if (ret < 0)
                goto config_error;

            /* Calculate transmission delay */
            ret = rt_dev_ioctl(sync_dev_fd, SIOCGCANBAUDRATE, &can_ifr);
            if (ret < 0)
                goto config_error;

            /* (47+64 bit) * 1.000.000.000 (ns/sec) / baudrate (bit/s) */
            sync_delay = 1000 * (111000000 / can_ifr.ifr_ifru.ifru_ivalue);
            break;

        case SYNC_SER_MASTER:
        case SYNC_SER_SLAVE:
            sync_dev_fd = rt_dev_open(clock_sync_dev, O_RDWR);
            if (sync_dev_fd < 0)
                goto sync_dev_error;
            set_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags);

            ret = rt_dev_ioctl(sync_dev_fd, RTSER_RTIOC_SET_CONFIG,
                               &sync_serial_config);
            if (ret < 0)
                goto config_error;

            /* (80 bit) * 1.000.000.000 (ns/sec) / baudrate (bit/s) */
            sync_delay = 1000 * (80000000 / sync_serial_config.baud_rate);
            break;
    }

    sync_dev_ctx = rtdm_context_get(sync_dev_fd);

    if (clock_sync_mode != SYNC_RTNET) {
        ret = rtdm_task_init(&sync_task, "TIMSClockSync", sync_task_func,
                             NULL, CLOCK_SYNC_PRIORITY, CLOCK_SYNC_PERIOD);
        if (ret < 0)
            return ret;
        set_bit(TIMS_INIT_BIT_SYNC_TASK, &init_flags);
    }

    return 0;

 sync_dev_error:
    tims_error("[CLOCK SYNC]: cannot open %s\n", clock_sync_dev);
    return sync_dev_fd;

 config_error:
    tims_info("[CLOCK SYNC]: error configuring sync device: %d\n", ret);
    return ret;
}


void tims_clock_cleanup(void)
{
    if (test_and_clear_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags))
        rt_dev_close(sync_dev_fd);

    if (test_and_clear_bit(TIMS_INIT_BIT_SYNC_TASK, &init_flags))
        rtdm_task_join_nrt(&sync_task, 100);
}
Example #16
0
static int
scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
	volatile cbd_t	*bdp;
	rtdm_lockctx_t context;


	RT_DEBUG(__FUNCTION__": ...\n");

	/* Fill in a Tx ring entry */
	bdp = cep->cur_tx;

#ifndef final_version
	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since cep->tx_busy should be set.
		 */
		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
		return 1;
	}
#endif

	/* Clear all of the status flags.
	 */
	bdp->cbd_sc &= ~BD_ENET_TX_STATS;

	/* If the frame is short, tell CPM to pad it.
	*/
	if (skb->len <= ETH_ZLEN)
		bdp->cbd_sc |= BD_ENET_TX_PAD;
	else
		bdp->cbd_sc &= ~BD_ENET_TX_PAD;

	/* Set buffer length and buffer pointer.
	*/
	bdp->cbd_datlen = skb->len;
	bdp->cbd_bufaddr = __pa(skb->data);

	/* Save skb pointer.
	*/
	cep->tx_skbuff[cep->skb_cur] = skb;

	cep->stats.tx_bytes += skb->len;
	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
	
	/* Prevent interrupts from changing the Tx ring from underneath us. */
	// *** RTnet ***
#if 0
	rtdm_irq_disable(&cep->irq_handle);
	rtdm_lock_get(&cep->lock);
#else
	rtdm_lock_get_irqsave(&cep->lock, context);
#endif

	/* Get and patch time stamp just before the transmission */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	flush_dcache_range((unsigned long)(skb->data),
			   (unsigned long)(skb->data + skb->len));


	/* Send it on its way.  Tell CPM its ready, interrupt when done,
	 * its the last BD of the frame, and to put the CRC on the end.
	 */
	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
#if 0
	dev->trans_start = jiffies;
#endif

	/* If this was the last BD in the ring, start at the beginning again.
	*/
	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
		bdp = cep->tx_bd_base;
	else
		bdp++;

	if (bdp->cbd_sc & BD_ENET_TX_READY) {
	        rtnetif_stop_queue(rtdev);
		cep->tx_full = 1;
	}

	cep->cur_tx = (cbd_t *)bdp;

	// *** RTnet ***
#if 0
	rtdm_lock_put(&cep->lock);
	rtdm_irq_enable(&cep->irq_handle);
#else
	rtdm_lock_put_irqrestore(&cep->lock, context);
#endif

	return 0;
}
Example #17
0
void task2(void *cookie)
{
	long i, max;
	nanosecs_abs_t t, dt;

	rt_printk("TESTING TIMING OUT TIMEDDOWN ...");
	for (max = i = 0; i < LOOPS; i++) {
		t = rtdm_clock_read();
		if (rtdm_sem_timeddown(&sem2, DELAY, NULL) == -ETIMEDOUT) {
			dt = rtdm_clock_read() - t - DELAY;
			if (dt > max) {
				max = dt;
			}
		} else {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK [%lu (ns)].\n", max);
	} else {
		rt_printk(" NOT OK [MAXLAT %lu (ns)].\n", max);
	}

	rt_printk("TESTING FAILING TRY DOWN ...");
	for (i = 0; i < LOOPS; i++) {
		if (rtdm_sem_timeddown(&sem2, RTDM_TIMEOUT_NONE, NULL) != -EWOULDBLOCK) {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}

	rt_printk("TESTING SUCCEEDING TRY DOWN ...");
	rtdm_sem_up(&sem2);
	for (i = 0; i < LOOPS; i++) {
		if (!rtdm_sem_timeddown(&sem2, RTDM_TIMEOUT_NONE, NULL)) {
			rtdm_sem_up(&sem2);
		} else {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}

	rt_printk("TESTING DOWN/UP ...");
	rtdm_sem_down(&sem2);
	for (i = 0; i < LOOPS; i++) {
		rtdm_sem_up(&sem1);
		if (rtdm_sem_down(&sem2)) {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}

	rt_printk("TESTING NOT TIMING OUT TIMEDDOWN ...");
	for (i = 0; i < LOOPS; i++) {
		rtdm_sem_up(&sem1);
		if (rtdm_sem_timeddown(&sem2, DELAY, NULL)) {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}
}
Example #18
0
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   rtdev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   rtdev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   rtdev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct /*RTnet*/rtskb *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   rtdev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#if 0 /*RTnet*/
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) {
				skb->rtdev = rtdev;
				/*RTnet*/rtskb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				//eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
				//		 pkt_len, 0);
				memcpy(rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#else
				memcpy(/*RTnet*/rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
#endif /*RTnet*/
			{
				char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %08x ? / %p.\n",
					       rtdev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       tp->rx_buffers[entry].mapping,
					       temp);/*RTnet*/
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
			skb->time_stamp = *time_stamp;
			/*RTnet*/rtnetif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
int tulip_interrupt(rtdm_irq_t *irq_handle)
{
	nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/
	struct rtnet_device *rtdev =
	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	long ioaddr = rtdev->base_addr;
	unsigned int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
		return 0;
	}

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
			rx += tulip_rx(rtdev, &time_stamp);
			tulip_refill_rx(rtdev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			rtdm_lock_get(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   rtdev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
				rtnetif_tx(rtdev);
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   rtdev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				/*RTnet*/rtnetif_wake_queue(rtdev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			rtdm_lock_put(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
#if 0 /*RTnet*/
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(rtdev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				/*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
					rtdev->name, tp->nir, error);
			}
#endif /*RTnet*/
			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
Example #19
0
void task2(void *cookie)
{
	long i, max, varl;
	nanosecs_abs_t t, dt;

	rt_printk("TESTING TIMING OUT TIMEDLOCK ...");
	for (max = i = 0; i < LOOPS; i++) {
		t = rtdm_clock_read();
		if (rtdm_mutex_timedlock(&mutex, DELAY, NULL) == -ETIMEDOUT) {
			dt = rtdm_clock_read() - t - DELAY;
			if (dt > max) {
				max = dt;
			}
		} else {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK [%lu (ns)].\n", max);
	} else {
		rt_printk(" NOT OK [MAXLAT %lu (ns)].\n", max);
	}

	rt_printk("TESTING FAILING TRY LOCK ...");
	for (i = 0; i < LOOPS; i++) {
		if (rtdm_mutex_timedlock(&mutex, RTDM_TIMEOUT_NONE, NULL) != -EWOULDBLOCK) {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}

	rt_printk("TESTING SUCCEEDING TRY LOCK ...");
	rtdm_sem_up(&sem);
	for (i = 0; i < LOOPS; i++) {
		if (!rtdm_mutex_timedlock(&mutex, RTDM_TIMEOUT_NONE, NULL)) {
			rtdm_mutex_unlock(&mutex);
		} else {
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}

	rt_printk("TESTING LOCK/UNLOCK ...");
	rtdm_sem_up(&sem);
	rtdm_sem_down(&sem);
	for (i = 0; i < LOOPS; i++) {
		if (rtdm_mutex_lock(&mutex)) {
			break;
		}
		varl = ++var;
		if (rtdm_mutex_lock(&mutex)) {
			break;
		}
		rtdm_mutex_unlock(&mutex);
		rtdm_mutex_unlock(&mutex);
		while(varl == var) rt_sleep(nano2count(TIMEOUT));
		if ((var - varl) != 1) {
			rt_printk("WRONG INCREMENT OF VARIABLE IN TASK2\n");
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}

	rt_printk("TESTING NOT TIMING OUT TIMEDLOCK ...");
	for (i = 0; i < LOOPS; i++) {
		if (rtdm_mutex_timedlock(&mutex, DELAY, NULL)) {
			break;
		}
		if (rtdm_mutex_lock(&mutex)) {
			break;
		}
		varl = ++var;
		rtdm_mutex_unlock(&mutex);
		rtdm_mutex_unlock(&mutex);
		while(varl == var) rt_sleep(nano2count(TIMEOUT));
		if ((var - varl) != 1) {
			rt_printk("WRONG INCREMENT OF VARIABLE IN TASK2\n");
			break;
		}
	}
	if (i == LOOPS) {
		rt_printk(" OK.\n");
	} else {
		rt_printk(" NOT OK.\n", max);
	}
}