static int uart_close_nrt(struct rtdm_dev_context *context,rtdm_user_info_t * user_info) { int err; MY_DEV *up=(MY_DEV *)context->device->device_data; dev_dbg(up->dev, "serial_omap_shutdown+%d\n", up->line); up->ier = 0; serial_out(up, UART_IER, 0); //disable break condition and FIFOs serial_out(up,UART_LCR,serial_in(up,UART_LCR) & ~UART_LCR_SBC); serial_omap_clear_fifos(up); //read data port to reset things and then free irq if(serial_in(up,UART_LSR) & UART_LSR_DR) (void)serial_in(up,UART_RX); err=rtdm_irq_disable(&up->irq_handle);//enable irq if(err<0) { rtdm_printk("error in rtdm_irq_disable\n"); return err; } rtdm_printk("rtdm_irq_disable\n"); rtdm_irq_free(&up->irq_handle); free_irq(up->irq, up); return 0; }
static int uart_omap_remove(struct platform_device *pdev) { rtdm_printk("omap_i2c_remove\n"); rtdm_printk("omap_i2c_remove"); return 0; }
void rtcan_raw_print_filter(struct rtcan_device *dev) { int i; struct rtcan_recv *r = dev->receivers; rtdm_printk("%s: recv_list=%p empty_list=%p free_entries=%d\n", dev->name, dev->recv_list, dev->empty_list, dev->free_entries); for (i = 0; i < RTCAN_MAX_RECEIVERS; i++, r++) { rtdm_printk("%2d %p sock=%p next=%p id=%x mask=%x\n", i, r, r->sock, r->next, r->can_filter.can_id, r->can_filter.can_mask); } }
static ssize_t uart_rd_rt(struct rtdm_dev_context *context,rtdm_user_info_t * user_info, void *buf,size_t nbyte) { int err; int ret=0; int count; MY_DEV *up=(MY_DEV *)context->device->device_data; u8 *tmp; printk("..............uart_rd_rt start\n"); tmp=(u8 *)rtdm_malloc(nbyte); up->buf_rx=(u8 *)tmp; up->buf_len_rx = nbyte; count =nbyte; if (!(up->ier & UART_IER_RDI)) { up->ier |= UART_IER_RDI; serial_out(up, UART_IER, up->ier); } err=rtdm_event_wait(&up->w_event_rx); if(err<0) { dev_err(up->dev,"controller timed out\n"); rtdm_printk("rtdm_event_timedwait: timeout\n"); return -ETIMEDOUT; } if(err==0) { ret=nbyte; } while(count--) { *tmp=read_buffer(up); printk("Receive rd=%x\n",*tmp); tmp = tmp +1; } tmp=tmp-nbyte; if(rtdm_safe_copy_to_user(user_info,buf,(void *)tmp, nbyte)) rtdm_printk("ERROR : can't copy data from driver\n"); printk("............uart_rd_rt end\n"); rtdm_free(tmp); return ret; }
void pwm_up(rtdm_timer_t *timer) { int retval; size_t channel = 0; for(; channel < RC_NUM; ++channel) { // set pwm to high gpio_set_value(channels[channel].pwm, 1); // ideal time to put signal down down_time[channel] = rtdm_clock_read_monotonic() + up_interval[channel]; if(reconfigured[channel]) { reconfigured[channel] = 0; rtdm_timer_stop(&down_timer[channel]); // request timer to fire DELTA ns earlier then needed retval = rtdm_timer_start(&down_timer[channel], up_interval[channel] - DELTA, PERIOD, RTDM_TIMERMODE_RELATIVE); if(retval) rtdm_printk("TB6612FNG: error reconfiguring down-timer #%i: %i\n", channel, retval); } } }
static void transmit_chars(struct uart_omap_port *up, unsigned int lsr) { u16 w; //trasmit holding register empty printk("trasmit_char.....start\n"); if(!(lsr & UART_LSR_THRE)) { printk("Holding register is empty\n"); return; } //(void) serial_in(up, UART_LSR); while (up->buf_len_tx) { // w = *up->buf_tx++; w = read_buffer(up); printk("up->buf_len_tx--=%d\n",up->buf_len_tx); up->buf_len_tx--; // rtdm_printk("BUFFER ADDRESS IN TRASMIT MODE=%x\n",up->buf); rtdm_printk("buffer value in trasmit_char=%x\n",w); serial_out(up, UART_TX, w); } if(up->buf_len_tx == 0) { serial_omap_stop_tx(up); } printk("trasmit_char......end\n"); }
void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock, can_frame_t *frame) { struct rtcan_rb_frame *rb_frame = &dev->tx_skb.rb_frame; RTCAN_ASSERT(dev->tx_socket == 0, rtdm_printk("(%d) TX skb still in use", dev->ifindex););
int __init rtwlan_init(void) { if (rtnet_register_ioctls(&rtnet_wlan_ioctls)) rtdm_printk(KERN_ERR "Failed to register rtnet_wlan_ioctl!\n"); return 0; }
/*** * common reply function */ static void rt_icmp_send_reply(struct icmp_bxm *icmp_param, struct rtskb *skb) { struct dest_route rt; int err; icmp_param->head.icmph.checksum = 0; icmp_param->csum = 0; /* route back to the source address via the incoming device */ if (rt_ip_route_output(&rt, skb->nh.iph->saddr, skb->rtdev->local_ip) != 0) return; rt_socket_reference(icmp_socket); err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_reply_bits, icmp_param, sizeof(struct icmphdr) + icmp_param->data_len, &rt, MSG_DONTWAIT); if (err) rt_socket_dereference(icmp_socket); rtdev_dereference(rt.rtdev); RTNET_ASSERT(err == 0, rtdm_printk("RTnet: %s() error in xmit\n", __FUNCTION__););
void t21142_start_nway(/*RTnet*/struct rtnet_device *rtdev) { struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; int csr14 = ((tp->sym_advertise & 0x0780) << 9) | ((tp->sym_advertise & 0x0020) << 1) | 0xffbf; rtdev->if_port = 0; tp->nway = tp->mediasense = 1; tp->nwayset = tp->lpar = 0; if (tulip_debug > 1) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n", rtdev->name, csr14); outl(0x0001, ioaddr + CSR13); udelay(100); outl(csr14, ioaddr + CSR14); tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0); outl(tp->csr6, ioaddr + CSR6); if (tp->mtable && tp->mtable->csr15dir) { outl(tp->mtable->csr15dir, ioaddr + CSR15); outl(tp->mtable->csr15val, ioaddr + CSR15); } else outw(0x0008, ioaddr + CSR15); outl(0x1301, ioaddr + CSR12); /* Trigger NWAY. */ }
static void __exit omap_uart_exit_driver(void) { rtdm_printk("omap_uart_exit_driver exit\n"); platform_driver_unregister(&omap_uart_driver); rtdm_dev_unregister(&uart_device, 1000); }
void cleanup_mpu9150_irq(void) { rtdm_printk("MPU9150-IRQ: Releasing IRQ resources...\n"); if( irq_requested ) { rtdm_irq_disable( &mpu9150_irq ); rtdm_irq_free( &mpu9150_irq ); } if( irq_gpio_requested ) gpio_free( mpu9150_irq_desc.gpio_desc.gpio ); // Close the pipe rt_pipe_delete( &pipe_desc ); rtdm_printk("MPU9150-IRQ: IRQ resources released\n"); }
void __exit rpi_gpio_exit(void) { rtdm_printk("RPI_GPIO RTDM, unloading\n"); // Unmap addr iounmap (virt_addr); rtdm_dev_unregister (&device, 1000); }
static irqreturn_t serial_omap_irq(int irq,void *dev_id) { // irqreturn_t ret; // MY_DEV *dev = dev_id; rtdm_printk("..............my_isr_2..............\n"); return IRQ_HANDLED; }
/*** * rt_loopback_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * */ static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { unsigned short hash; struct rtpacket_type *pt_entry; rtdm_lockctx_t context; /* write transmission stamp - in case any protocol ever gets the idea to ask the lookback device for this service... */ if (skb->xmit_stamp) *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); /* make sure that critical fields are re-intialised */ skb->chain_end = skb; /* parse the Ethernet header as usual */ skb->protocol = rt_eth_type_trans(skb, rtdev); skb->nh.raw = skb->data; rtdev_reference(rtdev); rtcap_report_incoming(skb); hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK; rtdm_lock_get_irqsave(&rt_packets_lock, context); list_for_each_entry(pt_entry, &rt_packets[hash], list_entry) if (pt_entry->type == skb->protocol) { pt_entry->refcount++; rtdm_lock_put_irqrestore(&rt_packets_lock, context); pt_entry->handler(skb, pt_entry); rtdm_lock_get_irqsave(&rt_packets_lock, context); pt_entry->refcount--; rtdm_lock_put_irqrestore(&rt_packets_lock, context); goto out; } rtdm_lock_put_irqrestore(&rt_packets_lock, context); /* don't warn if running in promiscuous mode (RTcap...?) */ if ((rtdev->flags & IFF_PROMISC) == 0) rtdm_printk("RTnet: unknown layer-3 protocol\n"); kfree_rtskb(skb); out: rtdev_dereference(rtdev); return 0; }
/** * Write in the device * * This function is called when the device is written in non-realtime context. * */ static ssize_t simple_rtdm_write_nrt(struct rtdm_dev_context *context, rtdm_user_info_t * user_info, const void *buf, size_t nbyte) { buffer_t * buffer = (buffer_t *) context->dev_private; buffer->size = (nbyte > SIZE_MAX) ? SIZE_MAX : nbyte; if (rtdm_safe_copy_from_user(user_info, buffer->data, buf, buffer->size)) rtdm_printk("ERROR : can't copy data to driver\n"); return nbyte; }
/** * Read from the device * * This function is called when the device is read in non-realtime context. * */ static ssize_t simple_rtdm_read_nrt(struct rtdm_dev_context *context, rtdm_user_info_t * user_info, void *buf, size_t nbyte) { buffer_t * buffer = (buffer_t *) context->dev_private; int size = (buffer->size > nbyte) ? nbyte : buffer->size; buffer->size = 0; if (rtdm_safe_copy_to_user(user_info, buf, buffer->data, size)) rtdm_printk("ERROR : can't copy data from driver\n"); return size; }
/* ************************************************************************ * This function runs in rtai context. * * It is called from inside rtnet whenever a packet has been received that * has to be processed by rtnetproxy. * ************************************************************************ */ static int rtnetproxy_recv(struct rtskb *rtskb) { /* Acquire rtskb (JK) */ if (rtskb_acquire(rtskb, &rtskb_pool) != 0) { rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n"); kfree_rtskb(rtskb); } /* Place the rtskb in the ringbuffer: */ else if (write_to_ringbuffer(&ring_rtskb_rtnet_kernel, rtskb)) { /* Switch over to kernel context: */ rtdm_nrtsig_pend(&rtnetproxy_signal); } else { /* No space in ringbuffer => Free rtskb here... */ rtdm_printk("rtnetproxy_recv: No space in queue\n"); kfree_rtskb(rtskb); } return 0; }
int initpwm(void) { int i; int retval; for(i = 0; i < RC_NUM; i++) { up_interval[i] = RANGE_MAP100(ranges[i][0], ranges[i][1], 0); reconfigured[i] = 0; } retval = InitGPIO(channels, sizeof(channels) / sizeof(channels[0])); if(retval) { rtdm_printk("TB6612FNG: GPIO initialization failed\n"); return retval; } rtdm_printk("TB6612FNG: GPIO initialized\n"); rtdm_printk("TB6612FNG: Starting PWM generation timers.\n"); retval = rtdm_timer_init(&up_timer, pwm_up, "up timer"); if(retval) { rtdm_printk("TB6612FNG: error initializing up-timer: %i\n", retval); return retval; } for(i = 0; i < RC_NUM; i++) { retval = rtdm_timer_init(&down_timer[i], pwm_down, "down timer"); if(retval) { rtdm_printk("TB6612FNG: error initializing down-timer #%i: %i\n", i, retval); return retval; } } retval = rtdm_timer_start(&up_timer, PERIOD, // we will use periodic timer PERIOD, // PERIOD period RTDM_TIMERMODE_RELATIVE); if(retval) { rtdm_printk("TB6612FNG: error starting up-timer: %i\n", retval); return retval; } rtdm_printk("TB6612FNG: timers created\n"); return 0; }
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp) { struct tulip_private *tp = (struct tulip_private *)rtdev->priv; int entry = tp->cur_rx % RX_RING_SIZE; int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; int received = 0; if (tulip_debug > 4) /*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, tp->rx_ring[entry].status); /* If we own the next entry, it is a new packet. Send it up. */ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); if (tulip_debug > 5) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", rtdev->name, entry, status); if (--rx_work_limit < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { if (tulip_debug > 1) /*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame " "spanned multiple buffers, status %8.8x!\n", rtdev->name, status); tp->stats.rx_length_errors++; } } else if (status & RxDescFatalErr) { /* There was a fatal error. */ if (tulip_debug > 2) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", rtdev->name, status); tp->stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) tp->stats.rx_length_errors++; if (status & 0x0004) tp->stats.rx_frame_errors++; if (status & 0x0002) tp->stats.rx_crc_errors++; if (status & 0x0001) tp->stats.rx_fifo_errors++; } } else { /* Omit the four octet CRC from the length. */ short pkt_len = ((status >> 16) & 0x7ff) - 4; struct /*RTnet*/rtskb *skb; #ifndef final_version if (pkt_len > 1518) { /*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", rtdev->name, pkt_len, pkt_len); pkt_len = 1518; tp->stats.rx_length_errors++; } #endif #if 0 /*RTnet*/ /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < tulip_rx_copybreak && (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) { skb->rtdev = rtdev; /*RTnet*/rtskb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) //eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, // pkt_len, 0); memcpy(rtskb_put(skb, pkt_len), tp->rx_buffers[entry].skb->tail, pkt_len); #else memcpy(/*RTnet*/rtskb_put(skb, pkt_len), tp->rx_buffers[entry].skb->tail, pkt_len); #endif } else { /* Pass up the skb already on the Rx ring. */ #endif /*RTnet*/ { char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb, pkt_len); #ifndef final_version if (tp->rx_buffers[entry].mapping != le32_to_cpu(tp->rx_ring[entry].buffer1)) { /*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses " "do not match in tulip_rx: %08x vs. %08x ? / %p.\n", rtdev->name, le32_to_cpu(tp->rx_ring[entry].buffer1), tp->rx_buffers[entry].mapping, temp);/*RTnet*/ } #endif pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[entry].skb = NULL; tp->rx_buffers[entry].mapping = 0; } skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev); skb->time_stamp = *time_stamp; /*RTnet*/rtnetif_rx(skb); tp->stats.rx_packets++; tp->stats.rx_bytes += pkt_len; } received++; entry = (++tp->cur_rx) % RX_RING_SIZE; } return received; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ int tulip_interrupt(rtdm_irq_t *irq_handle) { nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/ struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/ struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; unsigned int csr5; int entry; int missed; int rx = 0; int tx = 0; int oi = 0; int maxrx = RX_RING_SIZE; int maxtx = TX_RING_SIZE; int maxoi = TX_RING_SIZE; unsigned int work_count = tulip_max_interrupt_work; /* Let's see whether the interrupt really is for us */ csr5 = inl(ioaddr + CSR5); if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) { rtdm_printk("%s: unexpected IRQ!\n",rtdev->name); return 0; } tp->nir++; do { /* Acknowledge all of the current interrupt sources ASAP. */ outl(csr5 & 0x0001ffff, ioaddr + CSR5); if (tulip_debug > 4) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", rtdev->name, csr5, inl(rtdev->base_addr + CSR5)); if (csr5 & (RxIntr | RxNoBuf)) { rx += tulip_rx(rtdev, &time_stamp); tulip_refill_rx(rtdev); } if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { unsigned int dirty_tx; rtdm_lock_get(&tp->lock); for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status); if (status < 0) break; /* It still has not been Txed */ /* Check for Rx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping) pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); continue; } if (status & 0x8000) { /* There was an major error, log it. */ #ifndef final_version if (tulip_debug > 1) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", rtdev->name, status); #endif tp->stats.tx_errors++; if (status & 0x4104) tp->stats.tx_aborted_errors++; if (status & 0x0C00) tp->stats.tx_carrier_errors++; if (status & 0x0200) tp->stats.tx_window_errors++; if (status & 0x0002) tp->stats.tx_fifo_errors++; if ((status & 0x0080) && tp->full_duplex == 0) tp->stats.tx_heartbeat_errors++; } else { tp->stats.tx_bytes += tp->tx_buffers[entry].skb->len; tp->stats.collisions += (status >> 3) & 15; tp->stats.tx_packets++; } pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tp->tx_buffers[entry].skb->len, PCI_DMA_TODEVICE); /* Free the original skb. */ /*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb); tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tx++; rtnetif_tx(rtdev); } #ifndef final_version if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { /*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", rtdev->name, dirty_tx, tp->cur_tx); dirty_tx += TX_RING_SIZE; } #endif if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) /*RTnet*/rtnetif_wake_queue(rtdev); tp->dirty_tx = dirty_tx; if (csr5 & TxDied) { if (tulip_debug > 2) /*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped." " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6); tulip_restart_rxtx(tp); } rtdm_lock_put(&tp->lock); } /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ if (csr5 == 0xffffffff) break; #if 0 /*RTnet*/ if (csr5 & TxJabber) tp->stats.tx_errors++; if (csr5 & TxFIFOUnderflow) { if ((tp->csr6 & 0xC000) != 0xC000) tp->csr6 += 0x4000; /* Bump up the Tx threshold */ else tp->csr6 |= 0x00200000; /* Store-n-forward. */ /* Restart the transmit process. */ tulip_restart_rxtx(tp); outl(0, ioaddr + CSR1); } if (csr5 & (RxDied | RxNoBuf)) { if (tp->flags & COMET_MAC_ADDR) { outl(tp->mc_filter[0], ioaddr + 0xAC); outl(tp->mc_filter[1], ioaddr + 0xB0); } } if (csr5 & RxDied) { /* Missed a Rx frame. */ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; tp->stats.rx_errors++; tulip_start_rxtx(tp); } /* * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this * call is ever done under the spinlock */ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { if (tp->link_change) (tp->link_change)(rtdev, csr5); } if (csr5 & SytemError) { int error = (csr5 >> 23) & 7; /* oops, we hit a PCI error. The code produced corresponds * to the reason: * 0 - parity error * 1 - master abort * 2 - target abort * Note that on parity error, we should do a software reset * of the chip to get it back into a sane state (according * to the 21142/3 docs that is). * -- rmk */ /*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n", rtdev->name, tp->nir, error); } #endif /*RTnet*/ /*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, " "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5); /* Clear all error sources, included undocumented ones! */ outl(0x0800f7ba, ioaddr + CSR5); oi++; }
/*** * rt_ip_route_add_host: add or update host route */ int rt_ip_route_add_host(u32 addr, unsigned char *dev_addr, struct rtnet_device *rtdev) { rtdm_lockctx_t context; struct host_route *new_route; struct host_route *rt; unsigned int key; int ret = 0; rtdm_lock_get_irqsave(&rtdev->rtdev_lock, context); if ((!test_bit(PRIV_FLAG_UP, &rtdev->priv_flags) || test_and_set_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags))) { rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context); return -EBUSY; } rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context); if ((new_route = rt_alloc_host_route()) != NULL) { new_route->dest_host.ip = addr; new_route->dest_host.rtdev = rtdev; memcpy(new_route->dest_host.dev_addr, dev_addr, rtdev->addr_len); } key = ntohl(addr) & HOST_HASH_KEY_MASK; rtdm_lock_get_irqsave(&host_table_lock, context); rt = host_hash_tbl[key]; while (rt != NULL) { if ((rt->dest_host.ip == addr) && (rt->dest_host.rtdev->local_ip == rtdev->local_ip)) { rt->dest_host.rtdev = rtdev; memcpy(rt->dest_host.dev_addr, dev_addr, rtdev->addr_len); if (new_route) rt_free_host_route(new_route); rtdm_lock_put_irqrestore(&host_table_lock, context); goto out; } rt = rt->next; } if (new_route) { new_route->next = host_hash_tbl[key]; host_hash_tbl[key] = new_route; rtdm_lock_put_irqrestore(&host_table_lock, context); } else { rtdm_lock_put_irqrestore(&host_table_lock, context); /*ERRMSG*/rtdm_printk("RTnet: no more host routes available\n"); ret = -ENOBUFS; } out: clear_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags); return ret; }
/* During a receive, the cur_rx points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */ static int scc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp) { struct scc_enet_private *cep; volatile cbd_t *bdp; ushort pkt_len; struct rtskb *skb; RT_DEBUG(__FUNCTION__": ...\n"); cep = (struct scc_enet_private *)rtdev->priv; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = cep->cur_rx; for (;;) { if (bdp->cbd_sc & BD_ENET_RX_EMPTY) break; #ifndef final_version /* Since we have allocated space to hold a complete frame, both * the first and last indicators should be set. */ if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) rtdm_printk("CPM ENET: rcv is not first+last\n"); #endif /* Frame too long or too short. */ if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) cep->stats.rx_length_errors++; if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ cep->stats.rx_frame_errors++; if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ cep->stats.rx_crc_errors++; if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ cep->stats.rx_crc_errors++; /* Report late collisions as a frame error. * On this error, the BD is closed, but we don't know what we * have in the buffer. So, just drop this frame on the floor. */ if (bdp->cbd_sc & BD_ENET_RX_CL) { cep->stats.rx_frame_errors++; } else { /* Process the incoming frame. */ cep->stats.rx_packets++; pkt_len = bdp->cbd_datlen; cep->stats.rx_bytes += pkt_len; /* This does 16 byte alignment, much more than we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ skb = dev_alloc_rtskb(pkt_len-4, &cep->skb_pool); if (skb == NULL) { rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name); cep->stats.rx_dropped++; } else { skb->rtdev = rtdev; rtskb_put(skb,pkt_len-4); /* Make room */ memcpy(skb->data, cep->rx_vaddr[bdp - cep->rx_bd_base], pkt_len-4); skb->protocol=rt_eth_type_trans(skb,rtdev); skb->time_stamp = *time_stamp; rtnetif_rx(skb); (*packets)++; } } /* Clear the status flags for this buffer. */ bdp->cbd_sc &= ~BD_ENET_RX_STATS; /* Mark the buffer empty. */ bdp->cbd_sc |= BD_ENET_RX_EMPTY; /* Update BD pointer to next entry. */ if (bdp->cbd_sc & BD_ENET_RX_WRAP) bdp = cep->rx_bd_base; else bdp++; } cep->cur_rx = (cbd_t *)bdp; return 0; }
/* The interrupt handler. * This is called from the CPM handler, not the MPC core interrupt. */ static int scc_enet_interrupt(rtdm_irq_t *irq_handle) { struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); int packets = 0; struct scc_enet_private *cep; volatile cbd_t *bdp; ushort int_events; int must_restart; nanosecs_abs_t time_stamp = rtdm_clock_read(); cep = (struct scc_enet_private *)rtdev->priv; /* Get the interrupt events that caused us to be here. */ int_events = cep->sccp->scc_scce; cep->sccp->scc_scce = int_events; must_restart = 0; /* Handle receive event in its own function. */ if (int_events & SCCE_ENET_RXF) { scc_enet_rx(rtdev, &packets, &time_stamp); } /* Check for a transmit error. The manual is a little unclear * about this, so the debug code until I get it figured out. It * appears that if TXE is set, then TXB is not set. However, * if carrier sense is lost during frame transmission, the TXE * bit is set, "and continues the buffer transmission normally." * I don't know if "normally" implies TXB is set when the buffer * descriptor is closed.....trial and error :-). */ /* Transmit OK, or non-fatal error. Update the buffer descriptors. */ if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) { rtdm_lock_get(&cep->lock); bdp = cep->dirty_tx; while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { RT_DEBUG(__FUNCTION__": Tx ok\n"); if ((bdp==cep->cur_tx) && (cep->tx_full == 0)) break; if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ cep->stats.tx_heartbeat_errors++; if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ cep->stats.tx_window_errors++; if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ cep->stats.tx_aborted_errors++; if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ cep->stats.tx_fifo_errors++; if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ cep->stats.tx_carrier_errors++; /* No heartbeat or Lost carrier are not really bad errors. * The others require a restart transmit command. */ if (bdp->cbd_sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { must_restart = 1; cep->stats.tx_errors++; } cep->stats.tx_packets++; /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (bdp->cbd_sc & BD_ENET_TX_DEF) cep->stats.collisions++; /* Free the sk buffer associated with this last transmit. */ dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]); cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = cep->tx_bd_base; else bdp++; /* I don't know if we can be held off from processing these * interrupts for more than one frame time. I really hope * not. In such a case, we would now want to check the * currently available BD (cur_tx) and determine if any * buffers between the dirty_tx and cur_tx have also been * sent. We would want to process anything in between that * does not have BD_ENET_TX_READY set. */ /* Since we have freed up a buffer, the ring is no longer * full. */ if (cep->tx_full) { cep->tx_full = 0; if (rtnetif_queue_stopped(rtdev)) rtnetif_wake_queue(rtdev); } cep->dirty_tx = (cbd_t *)bdp; } if (must_restart) { volatile cpm8xx_t *cp; /* Some transmit errors cause the transmitter to shut * down. We now issue a restart transmit. Since the * errors close the BD and update the pointers, the restart * _should_ pick up without having to reset any of our * pointers either. */ cp = cpmp; cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG; while (cp->cp_cpcr & CPM_CR_FLG); } rtdm_lock_put(&cep->lock); } /* Check for receive busy, i.e. packets coming but no place to * put them. This "can't happen" because the receive interrupt * is tossing previous frames. */ if (int_events & SCCE_ENET_BSY) { cep->stats.rx_dropped++; rtdm_printk("CPM ENET: BSY can't happen.\n"); } if (packets > 0) rt_mark_stack_mgr(rtdev); return RTDM_IRQ_HANDLED; }
static int scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; volatile cbd_t *bdp; rtdm_lockctx_t context; RT_DEBUG(__FUNCTION__": ...\n"); /* Fill in a Tx ring entry */ bdp = cep->cur_tx; #ifndef final_version if (bdp->cbd_sc & BD_ENET_TX_READY) { /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since cep->tx_busy should be set. */ rtdm_printk("%s: tx queue full!.\n", rtdev->name); return 1; } #endif /* Clear all of the status flags. */ bdp->cbd_sc &= ~BD_ENET_TX_STATS; /* If the frame is short, tell CPM to pad it. */ if (skb->len <= ETH_ZLEN) bdp->cbd_sc |= BD_ENET_TX_PAD; else bdp->cbd_sc &= ~BD_ENET_TX_PAD; /* Set buffer length and buffer pointer. */ bdp->cbd_datlen = skb->len; bdp->cbd_bufaddr = __pa(skb->data); /* Save skb pointer. */ cep->tx_skbuff[cep->skb_cur] = skb; cep->stats.tx_bytes += skb->len; cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; /* Prevent interrupts from changing the Tx ring from underneath us. */ // *** RTnet *** #if 0 rtdm_irq_disable(&cep->irq_handle); rtdm_lock_get(&cep->lock); #else rtdm_lock_get_irqsave(&cep->lock, context); #endif /* Get and patch time stamp just before the transmission */ if (skb->xmit_stamp) *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); /* Push the data cache so the CPM does not get stale memory * data. */ flush_dcache_range((unsigned long)(skb->data), (unsigned long)(skb->data + skb->len)); /* Send it on its way. Tell CPM its ready, interrupt when done, * its the last BD of the frame, and to put the CRC on the end. */ bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); #if 0 dev->trans_start = jiffies; #endif /* If this was the last BD in the ring, start at the beginning again. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = cep->tx_bd_base; else bdp++; if (bdp->cbd_sc & BD_ENET_TX_READY) { rtnetif_stop_queue(rtdev); cep->tx_full = 1; } cep->cur_tx = (cbd_t *)bdp; // *** RTnet *** #if 0 rtdm_lock_put(&cep->lock); rtdm_irq_enable(&cep->irq_handle); #else rtdm_lock_put_irqrestore(&cep->lock, context); #endif return 0; }
/* I2C may be needed to bring up other drivers */ static int __init omap_uart_init_driver(void) { rtdm_printk("omap_uart_init_driver function\n"); return platform_driver_register(&omap_uart_driver); }
static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb) { struct rtcfg_frm_stage_2_cfg *stage_2_cfg; struct rtcfg_device *rtcfg_dev = &device[ifindex]; size_t data_len; int ret; if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg)) { rtdm_mutex_unlock(&rtcfg_dev->dev_mutex); RTCFG_DEBUG(1, "RTcfg: received invalid stage_2_cfg frame\n"); kfree_rtskb(rtskb); return; } stage_2_cfg = (struct rtcfg_frm_stage_2_cfg *)rtskb->data; __rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg)); if (stage_2_cfg->heartbeat_period) { ret = rtdm_timer_init(&rtcfg_dev->timer, rtcfg_timer, "rtcfg-timer"); if (ret == 0) { ret = rtdm_timer_start(&rtcfg_dev->timer, XN_INFINITE, (nanosecs_rel_t)ntohs(stage_2_cfg->heartbeat_period) * 1000000, RTDM_TIMERMODE_RELATIVE); if (ret < 0) rtdm_timer_destroy(&rtcfg_dev->timer); } if (ret < 0) /*ERRMSG*/rtdm_printk("RTcfg: unable to create timer task\n"); else set_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags); } /* add server to station list */ if (rtcfg_add_to_station_list(rtcfg_dev, rtskb->mac.ethernet->h_source, stage_2_cfg->flags) < 0) { rtdm_mutex_unlock(&rtcfg_dev->dev_mutex); RTCFG_DEBUG(1, "RTcfg: unable to process stage_2_cfg frage\n"); kfree_rtskb(rtskb); return; } rtcfg_dev->other_stations = ntohl(stage_2_cfg->stations); rtcfg_dev->spec.clt.cfg_len = ntohl(stage_2_cfg->cfg_len); data_len = MIN(rtcfg_dev->spec.clt.cfg_len, rtskb->len); if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) && (data_len > 0)) { rtcfg_client_queue_frag(ifindex, rtskb, data_len); rtskb = NULL; if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN); } else { if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) { rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE, 0); rtcfg_next_main_state(ifindex, test_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags) ? RTCFG_MAIN_CLIENT_READY : RTCFG_MAIN_CLIENT_2); } else rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ALL_FRAMES); rtcfg_send_ack(ifindex); } rtdm_mutex_unlock(&rtcfg_dev->dev_mutex); if (rtskb != NULL) kfree_rtskb(rtskb); }
static int uart_open_nrt(struct rtdm_dev_context *context,rtdm_user_info_t *user_info_t,int oflags_t) { MY_DEV *up=(MY_DEV *)context->device->device_data; // rtdm_lockctx_t context1; int retval; printk("Local struct up=%x\n",up); f_cir_buf(up); rtdm_lock_init(&up->lock); rtdm_event_init(&up->w_event_tx,0); rtdm_event_init(&up->w_event_rx,0); printk("name of irq=%s\n",up->name); retval = request_irq(up->irq, serial_omap_irq,0, up->name, up); // if (retval) // return retval; retval=rtdm_irq_request(&up->irq_handle,up->irq,rtdm_my_isr,0,up->name,up); if(retval<0) { rtdm_printk("error in requesting irq\n"); dev_err(up->dev, "failure requesting irq %i\n", up->irq); return retval; } dev_dbg(up->dev, "serial_omap_startup+%d\n", up->line); serial_omap_clear_fifos(up); serial_out(up,UART_MCR,UART_MCR_RTS); (void)serial_in(up,UART_LSR); if(serial_in(up,UART_LSR) & UART_LSR_DR) (void)serial_in(up,UART_RX); (void)serial_in(up,UART_IIR); (void)serial_in(up,UART_MSR); /* * Now, initialize the UART */ serial_out(up, UART_LCR, UART_LCR_WLEN8); printk("UART has word length of 8 bit\n"); up->msr_saved_flags=0; //enabling interrupts up->ier = UART_IER_RLSI | UART_IER_RDI; serial_out(up,UART_IER,up->ier); printk("enabling RLSI and RDI interrupt\n"); //enable module level wake up serial_out(up,UART_OMAP_WER,OMAP_UART_WER_MOD_WKUP); printk("OMAP_UART_WER_MOD_WKUP\n"); // up->port_activity=jiffies; return 0; }
static int rtdm_my_isr(rtdm_irq_t *irq_context) { MY_DEV *up=rtdm_irq_get_arg(irq_context,MY_DEV); up->systime1 = rtdm_clock_read(); up->timeout = up->systime1 - up->systime; printk("Interrupt Latency=%dl\n",up->timeout); up->systime1=0; up->systime=0; unsigned int iir,lsr; unsigned int type; irqreturn_t ret=IRQ_NONE; int err; int max_count = 256; rtdm_lockctx_t context1; printk("I am in rtdm_my_isr......!!!\n"); printk("Local struct up=%x\n",up); err = rtdm_irq_disable(&up->irq_handle); if(err<0) rtdm_printk("error in rtdm_irq_enable\n"); rtdm_lock_get_irqsave(&up->lock,context1); do{ iir = serial_in(up,UART_IIR); if(iir & UART_IIR_NO_INT) break; ret=IRQ_HANDLED; lsr = serial_in(up,UART_LSR); type = iir & 0x3e; switch(type) { case UART_IIR_THRI: printk("type of int:UART_IIR_THRI\n"); transmit_chars(up,lsr); rtdm_event_signal(&up->w_event_tx); break; case UART_IIR_RX_TIMEOUT: /*FALLTHROUGH*/ case UART_IIR_RDI: printk("type of int:UART_IIR_RDI\n"); serial_omap_rdi(up,lsr); rtdm_event_signal(&up->w_event_rx); break; case UART_IIR_RLSI: printk("type of int:UART_IIR_RLSI\n"); // serial_omap_rlsi(up,lsr); break; case UART_IIR_CTS_RTS_DSR: break; case UART_IIR_XOFF: /*simpleThrough*/ default: break; } }while(!(iir & UART_IIR_NO_INT) && max_count--); rtdm_lock_put_irqrestore(&up->lock,context1); err = rtdm_irq_enable(&up->irq_handle); if(err<0) rtdm_printk("error in rtdm_irq_enable\n"); printk("rtdm_irq ended\n"); up->systime = rtdm_clock_read(); return RTDM_IRQ_HANDLED; }
static ssize_t uart_wr_rt(struct rtdm_dev_context *context,rtdm_user_info_t * user_info,const void *buf, size_t nbyte) { int ret=0; int err; int count; char c; MY_DEV *up=(MY_DEV *)context->device->device_data; char *tmp; up->buf_len_tx = nbyte; printk("uart_wr_rt start\n"); tmp=rtdm_malloc(nbyte); if ((rtdm_safe_copy_from_user(user_info,tmp, buf, up->buf_len_tx))) rtdm_printk("ERROR : can't copy data to driver\n"); count=nbyte; while(count--) { write_buffer(up,*tmp); tmp=tmp+1; // up->buf_tx=(char *)tmp; // printk("up->buf_tx=%x\n",*up->buf_tx); //enable Trasmitter holding Register if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; up->systime = rtdm_clock_read(); serial_out(up, UART_IER, up->ier); } } printk("Tx interrupt enable\n"); printk("rtdm_event_wait before\n"); err=rtdm_event_wait(&up->w_event_tx); if(err<0) { dev_err(up->dev,"controller timed out\n"); rtdm_printk("rtdm_event_timedwait: timeout\n"); return -ETIMEDOUT; } up->systime1 = rtdm_clock_read(); up->timeout=(up->systime1)-(up->systime); printk("scheduling latency=%ld\n",up->timeout); if(err==0) { ret=nbyte; } printk("rtdm_event_wait after\n"); printk("uart_wr_rt end\n"); rtdm_free(tmp); return ret; }
static void serial_omap_set_termios(MY_DEV *up, unsigned int request) { int val; unsigned char cval = 0; unsigned int baud, quot; rtdm_lockctx_t context1; int err; printk("serial_omap_set_termios\n"); printk("Local struct up=%x\n",up); printk("request=%x\n",request); val=request & 0x03; printk("val=%x",val); switch(val) { case CS5_1: printk("CS5\n"); cval = UART_LCR_WLEN5; break; case CS6_1: printk("CS6\n"); cval = UART_LCR_WLEN6; break; case CS7_1: printk("CS7\n"); cval = UART_LCR_WLEN7; break; default: case CS8_1: printk("CS8\n"); cval = UART_LCR_WLEN8; break; } if(request & 0x04) { printk("set two stop bits\n"); cval |= UART_LCR_STOP; } if(request & 0x08) { printk("set even patity\n"); cval |= UART_LCR_PARITY; } if(request & 0x10) { printk("set odd parity\n"); cval |= UART_LCR_EPAR; } val=request & 0x60; val = val >> 5; switch(val) { case BAUD_4800: printk("BAUD_4800\n"); baud = 4800; break; case BAUD_9600: printk("BAUD_9600\n"); baud = 9600; break; case BAUD_115200: printk("BAUD_115200\n"); baud = 115200; default: printk("default\n"); baud = 9600; } // baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13); quot = serial_omap_get_divisor(up, baud);//for getting dll and dlh register value printk("serial_omap_get_divisor=%d\n",quot); up->calc_latency = (USEC_PER_SEC * up->fifosize) / (baud / 8); up->latency = up->calc_latency; up->dll = quot & 0xff; up->dlh = quot >> 8; up->mdr1 = UART_OMAP_MDR1_DISABLE; up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 | UART_FCR_ENABLE_FIFO; err = rtdm_irq_disable(&up->irq_handle); if(err<0) rtdm_printk("error in rtdm_irq_enable\n"); rtdm_lock_get_irqsave(&up->lock,context1); up->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; // if (termios->c_iflag & INPCK) // up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; //Frame error indicator, Parity error indicator // // if (termios->c_iflag & (BRKINT | PARMRK)) // up->port.read_status_mask |= UART_LSR_BI; //Break interrupt indicator up->ignore_status_mask = 0; //this should be passed from user space // if (termios->c_iflag & IGNBRK) // IGNBRK Ignore BREAK condition on input. // { printk("Ignore Break condition on input\n"); up->ignore_status_mask |= UART_LSR_BI; // } up->ier &= ~UART_IER_MSI; serial_out(up, UART_IER, up->ier); printk("Enable interrupt\n"); serial_out(up, UART_LCR, cval); //writing the setting to Line control register /* reset DLAB */ up->lcr = cval; //saving the setting of line control register up->scr = OMAP_UART_SCR_TX_EMPTY; serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); serial_out(up, UART_DLL, 0); serial_out(up, UART_DLM, 0); serial_out(up, UART_LCR, 0); //*********************************************************************** serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); //config to mode B up->efr = serial_in(up, UART_EFR) & ~UART_EFR_ECB;//value of efr register without enhance function write enable bit up->efr &= ~UART_EFR_SCD; //remove special character detect enable serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); //writing to EFR register with enhance function write enable bit //************************************************************************************ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); //config to mode A up->mcr = serial_in(up, UART_MCR) & ~UART_MCR_TCRTLR; //value to TCRTLR=0(No action) if 1 then we can enable TCR and TLR serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); //writing value to the MCR with TCRTLR enable /* FIFO ENABLE, DMA MODE */ up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; //enable the granularity of 1 for trigger RX level //******************************************************************* /* Set receive FIFO threshold to 16 characters and * transmit FIFO threshold to 16 spaces */ up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK; // dont set RX_FIFO_TRIG to 60 character up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK; //dont set TX_FIFO_TRIG to 56 character up->fcr |= UART_FCR6_R_TRIGGER_16 | UART_FCR6_T_TRIGGER_24 | UART_FCR_ENABLE_FIFO; //Rx fifo trigger at 16 character | Tx fifo trigger at 32 char | FIFO_EN serial_out(up, UART_FCR, up->fcr); //write to FCR //******************************************************************** serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); //config to mode B serial_out(up, UART_OMAP_SCR, up->scr); //writing to SCR(supplementary control register) //******************************************************************* /* Reset UART_MCR_TCRTLR: this must be done with the EFR_ECB bit set */ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); //config mode A serial_out(up, UART_MCR, up->mcr); //writing to MCR without TCRTLR //******************************************************************* serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); //config mode B serial_out(up, UART_EFR, up->efr); //writing to EFR register without special character detect enable //******************************************************************* serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); //config mode A /* Protocol, Baud Rate, and Interrupt Settings */ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) // serial_omap_mdr1_errataset(up, up->mdr1); else serial_out(up, UART_OMAP_MDR1, up->mdr1); //******************************************************************** serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); //config mode B serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); //writing to EFR register with special character serial_out(up, UART_LCR, 0); //writing line control register serial_out(up, UART_IER, 0); //writing to IER //******************************************************************** serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_DLL, up->dll); /* LS of divisor */ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */ serial_out(up, UART_LCR, 0); serial_out(up, UART_IER, up->ier); //******************************************************************** serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, cval); if (baud > 230400 && baud != 3000000) { printk("baud > 230400\n"); up->mdr1 = UART_OMAP_MDR1_13X_MODE; } else { printk("baud < 230400\n"); up->mdr1 = UART_OMAP_MDR1_16X_MODE; } if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) { printk("up->errata condition true\n"); serial_omap_mdr1_errataset(up, up->mdr1); } else { printk("up->errata condition false\n"); serial_out(up, UART_OMAP_MDR1, up->mdr1); } //*********************************************************************** /* Configure flow control */ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* XON1/XOFF1 accessible mode B, TCRTLR=0, ECB=0 */ // serial_out(up, UART_XON1, termios->c_cc[VSTART]); // serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]); /* Enable access to TCR/TLR */ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);//TCR trasmission control register value 0xFF //no hardware control up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS); serial_out(up, UART_MCR, up->mcr); //write to MCR printk("write to UART_MCR\n"); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); //write to LCR for switching to config mode B printk("Switch to config mode B\n"); serial_out(up, UART_EFR, up->efr); //write to EFR printk("write to EFR register\n"); serial_out(up, UART_LCR, up->lcr); //write to LCR printk("write to LCR\n"); rtdm_lock_put_irqrestore(&up->lock,context1); err = rtdm_irq_enable(&up->irq_handle); if(err<0) rtdm_printk("error in rtdm_irq_enable\n"); printk("serial_omap_set_termios end\n"); }