int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) { if (!dev || !dev->netdev) { IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); return -1; } if (!dev->irlap) { IRDA_WARNING("%s - too early: %p / %zd!\n", __FUNCTION__, cp, count); return -1; } if (cp==NULL) { /* error already at lower level receive * just update stats and set media busy */ irda_device_set_media_busy(dev->netdev, TRUE); dev->stats.rx_dropped++; IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count); return 0; } /* Read the characters into the buffer */ if (likely(atomic_read(&dev->enable_rx))) { while (count--) /* Unwrap and destuff one byte */ async_unwrap_char(dev->netdev, &dev->stats, &dev->rx_buff, *cp++); } else { while (count--) { /* rx not enabled: save the raw bytes and never * trigger any netif_rx. The received bytes are flushed * later when we re-enable rx but might be read meanwhile * by the dongle driver. */ dev->rx_buff.data[dev->rx_buff.len++] = *cp++; /* What should we do when the buffer is full? */ if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize)) dev->rx_buff.len = 0; } } return 0; }
static int girbil_reset(struct sir_dev *dev) { unsigned state = dev->fsm.substate; unsigned delay = 0; u8 control = GIRBIL_TXEN | GIRBIL_RXEN; int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch (state) { case SIRDEV_STATE_DONGLE_RESET: /* Reset dongle */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Sleep at least 5 ms */ delay = 20; state = GIRBIL_STATE_WAIT1_RESET; break; case GIRBIL_STATE_WAIT1_RESET: /* Set DTR and clear RTS to enter command mode */ sirdev_set_dtr_rts(dev, FALSE, TRUE); delay = 20; state = GIRBIL_STATE_WAIT2_RESET; break; case GIRBIL_STATE_WAIT2_RESET: /* Write control byte */ sirdev_raw_write(dev, &control, 1); delay = 20; state = GIRBIL_STATE_WAIT3_RESET; break; case GIRBIL_STATE_WAIT3_RESET: /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); dev->speed = 9600; break; default: IRDA_ERROR("%s(), undefined state %d\n", __func__, state); ret = -1; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; }
static int girbil_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; qos->min_turn_time.bits = 0x03; irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; }
/* Power on: (0) Clear RTS and DTR for 1 second (1) Set RTS and DTR for 1 second (2) 9600 bps now Note: assume RTS, DTR are clear before */ static void ma600_open(dongle_t *self, struct qos_info *qos) { IRDA_DEBUG(2, "%s()\n", __FUNCTION__); qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400 |IR_57600|IR_115200; qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */ irda_qos_bits_to_value(qos); //self->set_dtr_rts(self->dev, FALSE, FALSE); // should wait 1 second self->set_dtr_rts(self->dev, TRUE, TRUE); // should wait 1 second MOD_INC_USE_COUNT; }
static int girbil_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); sirdev_set_dtr_rts(dev, TRUE, TRUE); qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; qos->min_turn_time.bits = 0x03; irda_qos_bits_to_value(qos); return 0; }
static int girbil_reset(struct sir_dev *dev) { unsigned state = dev->fsm.substate; unsigned delay = 0; u8 control = GIRBIL_TXEN | GIRBIL_RXEN; int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch (state) { case SIRDEV_STATE_DONGLE_RESET: sirdev_set_dtr_rts(dev, TRUE, FALSE); delay = 20; state = GIRBIL_STATE_WAIT1_RESET; break; case GIRBIL_STATE_WAIT1_RESET: sirdev_set_dtr_rts(dev, FALSE, TRUE); delay = 20; state = GIRBIL_STATE_WAIT2_RESET; break; case GIRBIL_STATE_WAIT2_RESET: sirdev_raw_write(dev, &control, 1); delay = 20; state = GIRBIL_STATE_WAIT3_RESET; break; case GIRBIL_STATE_WAIT3_RESET: sirdev_set_dtr_rts(dev, TRUE, TRUE); dev->speed = 9600; break; default: IRDA_ERROR("%s(), undefined state %d\n", __func__, state); ret = -1; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; }
/* * Function iriap_open (void) * * Opens an instance of the IrIAP layer, and registers with IrLMP */ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, CONFIRM_CALLBACK callback) { struct iriap_cb *self; IRDA_DEBUG(2, "%s()\n", __func__); self = kzalloc(sizeof(*self), GFP_ATOMIC); if (!self) { IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); return NULL; } /* * Initialize instance */ self->magic = IAS_MAGIC; self->mode = mode; if (mode == IAS_CLIENT) { if (iriap_register_lsap(self, slsap_sel, mode)) { kfree(self); return NULL; } } self->confirm = callback; self->priv = priv; /* iriap_getvaluebyclass_request() will construct packets before * we connect, so this must have a sane value... Jean II */ self->max_header_size = LMP_MAX_HEADER; init_timer(&self->watchdog_timer); hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL); /* Initialize state machines */ iriap_next_client_state(self, S_DISCONNECT); iriap_next_call_state(self, S_MAKE_CALL); iriap_next_server_state(self, R_DISCONNECT); iriap_next_r_connect_state(self, R_WAITING); return self; }
/* * Function ircomm_ttp_connect_request (self, userdata) * * * */ int ircomm_ttp_connect_request(struct ircomm_cb *self, struct sk_buff *userdata, struct ircomm_info *info) { int ret = 0; IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); ret = irttp_connect_request(self->tsap, info->dlsap_sel, info->saddr, info->daddr, NULL, TTP_SAR_DISABLE, userdata); return ret; }
/* * Function irlan_eth_open (dev) * * Network device has been opened by user * */ static int irlan_eth_open(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); IRDA_DEBUG(2, "%s()\n", __func__ ); /* Ready to play! */ netif_stop_queue(dev); /* Wait until data link is ready */ /* We are now open, so time to do some work */ self->disconnect_reason = 0; irlan_client_wakeup(self, self->saddr, self->daddr); /* Make sure we have a hardware address before we return, so DHCP clients gets happy */ return wait_event_interruptible(self->open_wait, !self->tsap_data->connected); }
/* * Function ircomm_tty_tiocmget (tty) * * * */ int ircomm_tty_tiocmget(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int result; IRDA_DEBUG(2, "%s()\n", __func__ ); if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; result = ((self->settings.dte & IRCOMM_RTS) ? TIOCM_RTS : 0) | ((self->settings.dte & IRCOMM_DTR) ? TIOCM_DTR : 0) | ((self->settings.dce & IRCOMM_CD) ? TIOCM_CAR : 0) | ((self->settings.dce & IRCOMM_RI) ? TIOCM_RNG : 0) | ((self->settings.dce & IRCOMM_DSR) ? TIOCM_DSR : 0) | ((self->settings.dce & IRCOMM_CTS) ? TIOCM_CTS : 0); return result; }
int ma600_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Reset the dongle : set DTR low for 10 ms */ sirdev_set_dtr_rts(dev, FALSE, TRUE); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); dev->speed = 9600; /* That's the dongle-default */ return 0; }
static int litelink_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Power up dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Set the speeds we can accept */ qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600; qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; }
/* * Function ircomm_tty_set_termios (tty, old_termios) * * This routine allows the tty driver to be notified when device's * termios settings have changed. Note that a well-designed tty driver * should be prepared to accept the case where old == NULL, and try to * do something rational. */ void ircomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int cflag = tty->termios->c_cflag; IRDA_DEBUG(2, "%s()\n", __func__ ); if ((cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) { return; } ircomm_tty_change_speed(self); /* Handle transition to B0 status */ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) { self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS); ircomm_param_request(self, IRCOMM_DTE, TRUE); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { self->settings.dte |= IRCOMM_DTR; if (!(tty->termios->c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { self->settings.dte |= IRCOMM_RTS; } ircomm_param_request(self, IRCOMM_DTE, TRUE); } /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; ircomm_tty_start(tty); } }
static int old_belkin_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Not too fast, please... */ qos->baud_rate.bits &= IR_9600; /* Needs at least 10 ms (totally wild guess, can do probably better) */ qos->min_turn_time.bits = 0x01; irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; }
/* Power on: (0) Clear RTS and DTR for 1 second (1) Set RTS and DTR for 1 second (2) 9600 bps now Note: assume RTS, DTR are clear before */ static int ma600_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Explicitly set the speeds we can accept */ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400 |IR_57600|IR_115200; /* Hm, 0x01 means 10ms - for >= 1ms we would need 0x07 */ qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; }
/* * Function irda_device_is_receiving (dev) * * Check if the device driver is currently receiving data * */ int irda_device_is_receiving(struct net_device *dev) { struct if_irda_req req; int ret; IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (!dev->do_ioctl) { IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", __FUNCTION__); return -1; } ret = dev->do_ioctl(dev, (struct ifreq *) &req, SIOCGRECEIVING); if (ret < 0) return ret; return req.ifr_receiving; }
static irda_queue_t *dequeue_first(irda_queue_t **queue) { irda_queue_t *ret; IRDA_DEBUG( 4, "dequeue_first()\n"); ret = *queue; if ( *queue == NULL ) { } else if ( (*queue)->q_next == *queue ) { *queue = NULL; } else { (*queue)->q_prev->q_next = (*queue)->q_next; (*queue)->q_next->q_prev = (*queue)->q_prev; *queue = (*queue)->q_next; } return ret; }
static int tekram_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); sirdev_set_dtr_rts(dev, FALSE, TRUE); msleep(1); sirdev_set_dtr_rts(dev, TRUE, TRUE); udelay(75); dev->speed = 9600; return 0; }
static int tekram_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Clear DTR, Set RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); /* Should sleep 1 ms */ msleep(1); /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 50 us */ udelay(75); dev->speed = 9600; return 0; }
/* * Function irlap_queue_xmit (self, skb) * * A little wrapper for dev_queue_xmit, so we can insert some common * code into it. */ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) { /* Some common init stuff */ skb->dev = self->netdev; skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->priority = TC_PRIO_BESTEFFORT; irlap_insert_info(self, skb); if (unlikely(self->mode & IRDA_MODE_MONITOR)) { IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__, self->netdev->name); dev_kfree_skb(skb); return; } dev_queue_xmit(skb); }
static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) { irda_queue_t *ret; IRDA_DEBUG( 4, "dequeue_general()\n"); ret = *queue; if ( *queue == NULL ) { } else if ( (*queue)->q_next == *queue ) { *queue = NULL; } else { element->q_prev->q_next = element->q_next; element->q_next->q_prev = element->q_prev; if ( (*queue) == element) (*queue) = element->q_next; } return ret; }
static int tekram_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Clear DTR, Set RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); /* Should sleep 1 ms */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(MSECS_TO_JIFFIES(1)); /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 50 us */ udelay(75); dev->speed = 9600; return 0; }
static int mcp2120_reset(struct sir_dev *dev) { unsigned state = dev->fsm.substate; unsigned delay = 0; int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch (state) { case SIRDEV_STATE_DONGLE_RESET: ; /* Reset dongle by setting RTS*/ sirdev_set_dtr_rts(dev, TRUE, TRUE); state = MCP2120_STATE_WAIT1_RESET; delay = 50; break; case MCP2120_STATE_WAIT1_RESET: ; /* clear RTS and wait for at least 30 ms. */ sirdev_set_dtr_rts(dev, FALSE, FALSE); state = MCP2120_STATE_WAIT2_RESET; delay = 50; break; case MCP2120_STATE_WAIT2_RESET: ; /* Go back to normal mode */ sirdev_set_dtr_rts(dev, FALSE, FALSE); break; default: IRDA_ERROR("%s(), undefined state %d\n", __func__, state); ret = -EINVAL; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; }
/* * Function actisys_change_speed (task) * * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles. * To cycle through the available baud rates, pulse RTS low for a few us. * * First, we reset the dongle to always start from a known state. * Then, we cycle through the speeds by pulsing RTS low and then up. * The dongle allow us to pulse quite fast, se we can set speed in one go, * which is must faster ( < 100 us) and less complex than what is found * in some other dongle drivers... * Note that even if the new speed is the same as the current speed, * we reassert the speed. This make sure that things are all right, * and it's fast anyway... * By the way, this function will work for both type of dongles, * because the additional speed is at the end of the sequence... */ static int actisys_change_speed(struct sir_dev *dev, unsigned speed) { int ret = 0; int i = 0; IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__, speed, dev->speed); /* dongle was already resetted from irda_request state machine, * we are in known state (dongle default) */ /* * Now, we can set the speed requested. Send RTS pulses until we * reach the target speed */ for (i = 0; i < MAX_SPEEDS; i++) { if (speed == baud_rates[i]) { dev->speed = speed; break; } /* Set RTS low for 10 us */ sirdev_set_dtr_rts(dev, TRUE, FALSE); udelay(MIN_DELAY); /* Set RTS high for 10 us */ sirdev_set_dtr_rts(dev, TRUE, TRUE); udelay(MIN_DELAY); } /* Check if life is sweet... */ if (i >= MAX_SPEEDS) { actisys_reset(dev); ret = -EINVAL; /* This should not happen */ } /* Basta lavoro, on se casse d'ici... */ return ret; }
int ircomm_lmp_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata, struct ircomm_info *info) { struct sk_buff *skb; int ret; IRDA_DEBUG(0, __FUNCTION__ "()\n"); if (!userdata) { skb = dev_alloc_skb(64); if (!skb) return -ENOMEM; /* Reserve space for MUX and LAP header */ skb_reserve(skb, LMP_MAX_HEADER); userdata = skb; } ret = irlmp_disconnect_request(self->lsap, userdata); return ret; }
static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, struct sk_buff *skb, struct ircomm_info *info) { int ret = 0; switch (event) { case IRCOMM_CONNECT_REQUEST: ircomm_next_state(self, IRCOMM_WAITI); ret = self->issue.connect_request(self, skb, info); break; case IRCOMM_TTP_CONNECT_INDICATION: case IRCOMM_LMP_CONNECT_INDICATION: ircomm_next_state(self, IRCOMM_WAITR); ircomm_connect_indication(self, skb, info); break; default: IRDA_DEBUG(4, "%s(), unknown event: %s\n", __func__ , ircomm_event[event]); ret = -EINVAL; } return ret; }
/* * Function irlan_eth_close (dev) * * Stop the ether network device, his function will usually be called by * ifconfig down. We should now disconnect the link, We start the * close timer, so that the instance will be removed if we are unable * to discover the remote device after the disconnect. */ static int irlan_eth_close(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); IRDA_DEBUG(2, "%s()\n", __func__ ); /* Stop device */ netif_stop_queue(dev); irlan_close_data_channel(self); irlan_close_tsaps(self); irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL); irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL); /* Remove frames queued on the control channel */ skb_queue_purge(&self->client.txq); self->client.tx_busy = 0; return 0; }
/* * Function irlmp_init (void) * * Create (allocate) the main IrLMP structure * */ int __init irlmp_init(void) { IRDA_DEBUG(1, "%s()\n", __func__); /* Initialize the irlmp structure. */ irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); if (irlmp == NULL) return -ENOMEM; irlmp->magic = LMP_MAGIC; irlmp->clients = hashbin_new(HB_LOCK); irlmp->services = hashbin_new(HB_LOCK); irlmp->links = hashbin_new(HB_LOCK); irlmp->unconnected_lsaps = hashbin_new(HB_LOCK); irlmp->cachelog = hashbin_new(HB_NOLOCK); if ((irlmp->clients == NULL) || (irlmp->services == NULL) || (irlmp->links == NULL) || (irlmp->unconnected_lsaps == NULL) || (irlmp->cachelog == NULL)) { return -ENOMEM; } spin_lock_init(&irlmp->cachelog->hb_spinlock); irlmp->last_lsap_sel = 0x0f; /* Reserved 0x00-0x0f */ strcpy(sysctl_devname, "Linux"); init_timer(&irlmp->discovery_timer); /* Do discovery every 3 seconds, conditionally */ if (sysctl_discovery) irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout*HZ); return 0; }
/* * Function iriap_open (void) * * Opens an instance of the IrIAP layer, and registers with IrLMP */ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, CONFIRM_CALLBACK callback) { struct iriap_cb *self; IRDA_DEBUG(2, __FUNCTION__ "()\n"); self = kmalloc(sizeof(struct iriap_cb), GFP_ATOMIC); if (!self) { WARNING(__FUNCTION__ "(), Unable to kmalloc!\n"); return NULL; } /* * Initialize instance */ memset(self, 0, sizeof(struct iriap_cb)); self->magic = IAS_MAGIC; self->mode = mode; if (mode == IAS_CLIENT) iriap_register_lsap(self, slsap_sel, mode); self->confirm = callback; self->priv = priv; init_timer(&self->watchdog_timer); hashbin_insert(iriap, (irda_queue_t *) self, (int) self, NULL); /* Initialize state machines */ iriap_next_client_state(self, S_DISCONNECT); iriap_next_call_state(self, S_MAKE_CALL); iriap_next_server_state(self, R_DISCONNECT); iriap_next_r_connect_state(self, R_WAITING); return self; }
/* * Function ias_new_object (name, id) * * Create a new IAS object * */ struct ias_object *irias_new_object( char *name, int id) { struct ias_object *obj; IRDA_DEBUG( 4, "%s()\n", __func__); obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); if (obj == NULL) { IRDA_WARNING("%s(), Unable to allocate object!\n", __func__); return NULL; } obj->magic = IAS_OBJECT_MAGIC; obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); if (!obj->name) { IRDA_WARNING("%s(), Unable to allocate name!\n", __func__); kfree(obj); return NULL; } obj->id = id; /* Locking notes : the attrib spinlock has lower precendence * than the objects spinlock. Never grap the objects spinlock * while holding any attrib spinlock (risk of deadlock). Jean II */ obj->attribs = hashbin_new(HB_LOCK); if (obj->attribs == NULL) { IRDA_WARNING("%s(), Unable to allocate attribs!\n", __func__); kfree(obj->name); kfree(obj); return NULL; } return obj; }