static int hfc_sys_chan_rx_chan_open(struct ks_chan *ks_chan) { struct hfc_sys_chan_rx *chan_rx = to_sys_chan_rx(ks_chan); struct hfc_sys_chan *chan = chan_rx->chan; struct hfc_card *card = chan->port->card; struct ks_chan *prev_chan; int err; hfc_card_lock(card); chan_rx->fifo.subchannel_bit_start = 0; chan_rx->fifo.subchannel_bit_count = 8; prev_chan = ks_pipeline_prev(ks_chan); if (prev_chan && prev_chan->ops == &hfc_st_chan_rx_chan_ops) { struct hfc_st_chan_rx *st_chan_rx = container_of(prev_chan, struct hfc_st_chan_rx, ks_chan); chan_rx->fifo.subchannel_bit_start = st_chan_rx->chan->subchannel_bit_start; chan_rx->fifo.subchannel_bit_count = st_chan_rx->chan->subchannel_bit_count; } else if (prev_chan && prev_chan->ops == &hfc_pcm_chan_rx_chan_ops) {
void hfc_sys_port_update_fsm( struct hfc_sys_port *port) { // struct hfc_card *card = port->card; struct hfc_fsm_entry *entries; int nentries = 0; int i; BUG_ON(port->num_chans > 64); entries = kmalloc(sizeof(*entries) * 64, GFP_ATOMIC); if (!entries) { WARN_ON(1); return; } for (i=0; i<port->num_chans; i++) { // If FIFO open! FIXME TODO if (1) { struct ks_chan *prev_chan; prev_chan = ks_pipeline_prev( &port->chans[i].rx.ks_chan); if (!prev_chan) { } else if (prev_chan->ops == &hfc_st_chan_rx_chan_ops) { struct hfc_st_chan_rx *chan_rx = container_of(prev_chan, struct hfc_st_chan_rx, ks_chan); entries[nentries].fifo = &port->chans[i].rx.fifo; entries[nentries].hfc_chan_hwindex = chan_rx->chan->hw_index; nentries++; } /*else if (prev_chan->ops == &hfc_pcm_chan_rx_chan_ops) { * ALLOCATE A HFC_CHAN struct hfc_pcm_chan_rx *chan_rx = container_of(prev_chan, struct hfc_pcm_chan_rx, ks_chan); entries[nentries++].fifo = &port->chans[i].rx.fifo; entries[nentries].hfc_chan_hwindex = chan_rx->hw_index; } */ else WARN_ON(1); } if (1) { struct ks_chan *next_chan; next_chan = ks_pipeline_next( &port->chans[i].tx.ks_chan); if (!next_chan) { } else if(next_chan->ops == &hfc_st_chan_tx_chan_ops) { struct hfc_st_chan_tx *chan_tx = container_of(next_chan, struct hfc_st_chan_tx, ks_chan); entries[nentries].fifo = &port->chans[i].tx.fifo; entries[nentries].hfc_chan_hwindex = chan_tx->chan->hw_index; nentries++; }/* else if (next_chan->ops == &hfc_pcm_chan_tx_chan_ops) { * ALLOCATE A HFC_CHAN struct hfc_pcm_chan_tx *chan_tx = container_of(next_chan, struct hfc_pcm_chan_tx, ks_chan); entries[nentries++].fifo = &port->chans[i].tx.fifo; entries[nentries].hfc_chan_hwindex = chan_tx->hw_index; }*/ else WARN_ON(1); }
static ssize_t hfc_show_fifo_state( struct visdn_port *visdn_port, struct visdn_port_attribute *attr, char *buf) { struct hfc_sys_port *port = to_sys_port(visdn_port); struct hfc_card *card = port->card; int i; *buf = '\0'; sanprintf(buf, PAGE_SIZE, "\n" " Receive Transmit\n" " # F1 F2 Z1 Z2 Used Mode Conn F1 F2 Z1 Z2" " Used Mode Conn\n"); hfc_card_lock(card); for (i=0; i<port->num_chans; i++) { // if (!card->fifos[i][RX].used && !card->fifos[i][TX].used) // continue; struct hfc_fifo *fifo_rx = &port->chans[i].rx.fifo; struct hfc_fifo *fifo_tx = &port->chans[i].tx.fifo; union hfc_fgroup f; union hfc_zgroup z; sanprintf(buf, PAGE_SIZE, "%2d:", i); hfc_fifo_select(fifo_rx); f.f1f2 = hfc_inw(card, hfc_A_F12); z.z1z2 = hfc_inl(card, hfc_A_Z12); sanprintf(buf, PAGE_SIZE, " %02x %02x %04x %04x %4d %c%c%c ", f.f1, f.f2, z.z1, z.z2, hfc_fifo_used(fifo_rx), fifo_rx->framer_enabled ? 'H' : ' ', fifo_rx->enabled ? 'E' : ' ', fifo_rx->bit_reversed ? 'R' : ' '); { struct ks_chan *prev_chan; prev_chan = ks_pipeline_prev(&port->chans[i].rx.ks_chan); if (!prev_chan) { sanprintf(buf, PAGE_SIZE, " "); } else if (prev_chan->ops == &hfc_st_chan_rx_chan_ops) { struct hfc_st_chan_rx *chan_rx = container_of(prev_chan, struct hfc_st_chan_rx, ks_chan); sanprintf(buf, PAGE_SIZE, "st%d:%-2s", chan_rx->chan->port->id, kobject_name(&chan_rx->chan->ks_node.kobj)); /* } else if (prev_chan->ops == &hfc_pcm_chan_rx_chan_ops) { struct hfc_pcm_chan_rx *chan_rx = container_of(prev_chan, struct hfc_pcm_chan_rx, ks_chan); sanprintf(buf, PAGE_SIZE, "pcm%d:%s", chan_rx->chan->port->id, chan_rx->chan->ks_node.kobj.name);*/ } } hfc_fifo_select(fifo_tx); f.f1f2 = hfc_inw(card, hfc_A_F12); z.z1z2 = hfc_inl(card, hfc_A_Z12); sanprintf(buf, PAGE_SIZE, " %02x %02x %04x %04x %4d %c%c%c ", f.f1, f.f2, z.z1, z.z2, hfc_fifo_used(fifo_tx), fifo_tx->framer_enabled ? 'H' : ' ', fifo_tx->enabled ? 'E' : ' ', fifo_tx->bit_reversed ? 'R' : ' '); { struct ks_chan *next_chan; next_chan = ks_pipeline_next(&port->chans[i].tx.ks_chan); if (!next_chan) { sanprintf(buf, PAGE_SIZE, "\n"); } else if (next_chan->ops == &hfc_st_chan_tx_chan_ops) { struct hfc_st_chan_tx *chan_tx = container_of(next_chan, struct hfc_st_chan_tx, ks_chan); sanprintf(buf, PAGE_SIZE, "st%d:%-2s\n", chan_tx->chan->port->id, kobject_name(&chan_tx->chan->ks_node.kobj)); /* } else if (next_chan->ops == &hfc_pcm_chan_tx_chan_ops) { struct hfc_pcm_chan_tx *chan_tx = container_of(next_chan, struct hfc_pcm_chan_tx, ks_chan); sanprintf(buf, PAGE_SIZE, "pcm%d:%s\n", chan_tx->chan->port->id, chan_tx->chan->ks_node.kobj.name);*/ } } } hfc_card_unlock(card); return strlen(buf); }