/** * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes * @dma: the dma number * @size: the dma transfer size * * Returns the current pointer in DMA tranfer buffer in bytes */ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size) { unsigned long flags; unsigned int result, result1; flags = claim_dma_lock(); clear_dma_ff(dma); if (!isa_dma_bridge_buggy) disable_dma(dma); result = get_dma_residue(dma); /* * HACK - read the counter again and choose higher value in order to * avoid reading during counter lower byte roll over if the * isa_dma_bridge_buggy is set. */ result1 = get_dma_residue(dma); if (!isa_dma_bridge_buggy) enable_dma(dma); release_dma_lock(flags); if (unlikely(result < result1)) result = result1; #ifdef CONFIG_SND_DEBUG if (result > size) snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size); #endif if (result >= size || result == 0) return 0; else return size - result; }
static snd_pcm_uframes_t snd_stm_pcm_player_pointer(struct snd_pcm_substream *substream) { struct snd_stm_pcm_player *pcm_player = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int residue, hwptr; snd_pcm_uframes_t pointer; snd_stm_printd(2, "snd_stm_pcm_player_pointer(substream=0x%p)\n", substream); BUG_ON(!pcm_player); BUG_ON(!snd_stm_magic_valid(pcm_player)); BUG_ON(!runtime); residue = get_dma_residue(pcm_player->fdma_channel); hwptr = (runtime->dma_bytes - residue) % runtime->dma_bytes; pointer = bytes_to_frames(runtime, hwptr); snd_stm_printd(2, "FDMA residue value is %i and buffer size is %u" " bytes...\n", residue, runtime->dma_bytes); snd_stm_printd(2, "... so HW pointer in frames is %lu (0x%lx)!\n", pointer, pointer); return pointer; }
void DMAbuf_outputintr(int dev, int notify_only) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; struct dma_buffparms *dmap = adev->dmap_out; save_flags(flags); cli(); if (!(dmap->flags & DMA_NODMA)) { int chan = dmap->dma, pos, n; unsigned long f; f=claim_dma_lock(); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); clear_dma_ff(chan); pos = dmap->bytes_in_use - get_dma_residue(chan); if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); pos = pos / dmap->fragment_size; /* Actual qhead */ if (pos < 0 || pos >= dmap->nbufs) pos = 0; n = 0; while (dmap->qhead != pos && n++ < dmap->nbufs) do_outputintr(dev, notify_only); } else do_outputintr(dev, notify_only); restore_flags(flags); }
static void handlewrite(struct net_device *dev) { /* called *only* from idle, non-reentrant */ /* on entry, 0xfb and ltdmabuf holds data */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_WRITE); set_dma_addr(dma,virt_to_bus(ltdmabuf)); set_dma_count(dma,800); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfb) ) { flags=claim_dma_lock(); printk("timed out in handlewrite, dma res %d\n", get_dma_residue(dev->dma) ); release_dma_lock(flags); } }
/* called in irq context */ void DMAbuf_inputintr(int dev) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_in; unsigned long flags; spin_lock_irqsave(&dmap->lock,flags); if (!(dmap->flags & DMA_NODMA)) { int chan = dmap->dma, pos, n; unsigned long f; f=claim_dma_lock(); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); clear_dma_ff(chan); pos = dmap->bytes_in_use - get_dma_residue(chan); if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); pos = pos / dmap->fragment_size; /* Actual qhead */ if (pos < 0 || pos >= dmap->nbufs) pos = 0; n = 0; while (dmap->qtail != pos && ++n < dmap->nbufs) do_inputintr(dev); } else do_inputintr(dev); spin_unlock_irqrestore(&dmap->lock,flags); }
static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data) { unsigned long dmaflags; int count; WARN_ON(host->mrq == NULL); /* * Send a stop command if needed. */ if (data->stop) wbsd_send_command(host, data->stop); /* * DMA transfer? */ if (host->dma >= 0) { /* * Disable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Turn of ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); count = get_dma_residue(host->dma); release_dma_lock(dmaflags); /* * Any leftover data? */ if (count) { printk(KERN_ERR DRIVER_NAME ": Incomplete DMA " "transfer. %d bytes left.\n", count); data->error = MMC_ERR_FAILED; } else { /* * Transfer data from DMA buffer to * SG list. */ if (data->flags & MMC_DATA_READ) wbsd_dma_to_sg(host, data); data->bytes_xfered = host->size; } } DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered); wbsd_request_end(host, host->mrq); }
static ssize_t __dma_write(gpib_board_t *board, nec7210_private_t *priv, dma_addr_t address, size_t length) { unsigned long flags, dma_irq_flags; int residue = 0; int retval = 0; spin_lock_irqsave(&board->spinlock, flags); /* program dma controller */ dma_irq_flags = claim_dma_lock(); disable_dma(priv->dma_channel); clear_dma_ff(priv->dma_channel); set_dma_count(priv->dma_channel, length); set_dma_addr(priv->dma_channel, address); set_dma_mode(priv->dma_channel, DMA_MODE_WRITE ); enable_dma(priv->dma_channel); release_dma_lock(dma_irq_flags); // enable board's dma for output nec7210_set_reg_bits( priv, IMR2, HR_DMAO, HR_DMAO ); clear_bit(WRITE_READY_BN, &priv->state); set_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state); spin_unlock_irqrestore(&board->spinlock, flags); // suspend until message is sent if(wait_event_interruptible(board->wait, test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 || test_bit( BUS_ERROR_BN, &priv->state ) || test_bit( DEV_CLEAR_BN, &priv->state ) || test_bit(TIMO_NUM, &board->status))) { GPIB_DPRINTK( "gpib write interrupted!\n" ); retval = -ERESTARTSYS; } if(test_bit(TIMO_NUM, &board->status)) retval = -ETIMEDOUT; if( test_and_clear_bit( DEV_CLEAR_BN, &priv->state ) ) retval = -EINTR; if( test_and_clear_bit( BUS_ERROR_BN, &priv->state ) ) retval = -EIO; // disable board's dma nec7210_set_reg_bits( priv, IMR2, HR_DMAO, 0 ); dma_irq_flags = claim_dma_lock(); clear_dma_ff(priv->dma_channel); disable_dma(priv->dma_channel); residue = get_dma_residue(priv->dma_channel); release_dma_lock( dma_irq_flags ); if(residue) retval = -EPIPE; return retval ? retval : length; }
void labpc_drain_dma(struct comedi_device *dev) { struct labpc_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int status; unsigned long flags; unsigned int max_points, num_points, residue, leftover; int i; status = devpriv->stat1; flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); /* figure out how many points to read */ max_points = devpriv->dma_transfer_size / sample_size; /* residue is the number of points left to be done on the dma * transfer. It should always be zero at this point unless * the stop_src is set to external triggering. */ residue = get_dma_residue(devpriv->dma_chan) / sample_size; num_points = max_points - residue; if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_points) num_points = devpriv->count; /* figure out how many points will be stored next time */ leftover = 0; if (cmd->stop_src != TRIG_COUNT) { leftover = devpriv->dma_transfer_size / sample_size; } else if (devpriv->count > num_points) { leftover = devpriv->count - num_points; if (leftover > max_points) leftover = max_points; } /* write data to comedi buffer */ for (i = 0; i < num_points; i++) cfc_write_to_buffer(s, devpriv->dma_buffer[i]); if (cmd->stop_src == TRIG_COUNT) devpriv->count -= num_points; /* set address and count for next transfer */ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); async->events |= COMEDI_CB_BLOCK; }
int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction) { /* * Try to approximate the active byte position of the DMA pointer within the * buffer area as well as possible. */ int pos; unsigned long flags; unsigned long f; save_flags(flags); cli(); if (!(dmap->flags & DMA_ACTIVE)) pos = 0; else { int chan = dmap->dma; f=claim_dma_lock(); clear_dma_ff(chan); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); pos = get_dma_residue(chan); pos = dmap->bytes_in_use - pos; if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) { if (direction == DMODE_OUTPUT) { if (dmap->qhead == 0) if (pos > dmap->fragment_size) pos = 0; } else { if (dmap->qtail == 0) if (pos > dmap->fragment_size) pos = 0; } } if (pos < 0) pos = 0; if (pos >= dmap->bytes_in_use) pos = 0; if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); } restore_flags(flags); /* printk( "%04x ", pos); */ return pos; }
static __inline__ int NCR53c406a_dma_residual (void) { register int tmp; unsigned long flags = 0; save_flags(flags); cli(); clear_dma_ff(dma_chan); tmp = get_dma_residue(dma_chan); restore_flags(flags); return tmp; }
static __inline__ int NCR53c406a_dma_residual (void) { register int tmp; unsigned long flags; flags=claim_dma_lock(); clear_dma_ff(dma_chan); tmp = get_dma_residue(dma_chan); release_dma_lock(flags); return tmp; }
static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs) { if (get_dma_residue(PVR2_CASCADE_CHAN)) { printk(KERN_WARNING "DMA: SH DMAC did not complete transfer " "on channel %d, waiting..\n", PVR2_CASCADE_CHAN); dma_wait_for_completion(PVR2_CASCADE_CHAN); } if (count++ < 10) pr_debug("Got a pvr2 dma interrupt for channel %d\n", irq - HW_EVENT_PVR2_DMA); return IRQ_HANDLED; }
/* Check to make sure that a DMA transfer hasn't timed out. This should * never happen in theory, but seems to occur occasionally if the card gets * prodded at the wrong time. */ static inline void check_3c505_dma(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { unsigned long flags, f; pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma)); spin_lock_irqsave(&adapter->lock, flags); adapter->dmaing = 0; adapter->busy = 0; f=claim_dma_lock(); disable_dma(dev->dma); release_dma_lock(f); if (adapter->rx_active) adapter->rx_active--; outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); spin_unlock_irqrestore(&adapter->lock, flags); } }
/** * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes * @dma: the dma number * @size: the dma transfer size * * Returns the current pointer in DMA tranfer buffer in bytes */ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size) { unsigned long flags; unsigned int result; flags = claim_dma_lock(); clear_dma_ff(dma); if (!isa_dma_bridge_buggy) disable_dma(dma); result = get_dma_residue(dma); if (!isa_dma_bridge_buggy) enable_dma(dma); release_dma_lock(flags); #ifdef CONFIG_SND_DEBUG if (result > size) snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size); #endif if (result >= size || result == 0) return 0; else return size - result; }
static int sscape_download_boot(struct sscape_info *devc, unsigned char *block, int size, int flag) { unsigned long flags; unsigned char temp; volatile int done, timeout_val; static unsigned char codec_dma_bits = 0; if (flag & CPF_FIRST) { /* * First block. Have to allocate DMA and to reset the board * before continuing. */ save_flags(flags); cli(); codec_dma_bits = sscape_read(devc, GA_CDCFG_REG); if (devc->dma_allocated == 0) devc->dma_allocated = 1; restore_flags(flags); sscape_write(devc, GA_HMCTL_REG, (temp = sscape_read(devc, GA_HMCTL_REG)) & 0x3f); /*Reset */ for (timeout_val = 10000; timeout_val > 0; timeout_val--) sscape_read(devc, GA_HMCTL_REG); /* Delay */ /* Take board out of reset */ sscape_write(devc, GA_HMCTL_REG, (temp = sscape_read(devc, GA_HMCTL_REG)) | 0x80); } /* * Transfer one code block using DMA */ if (audio_devs[devc->codec_audiodev]->dmap_out->raw_buf == NULL) { printk(KERN_WARNING "soundscape: DMA buffer not available\n"); return 0; } memcpy(audio_devs[devc->codec_audiodev]->dmap_out->raw_buf, block, size); save_flags(flags); cli(); /******** INTERRUPTS DISABLED NOW ********/ do_dma(devc, SSCAPE_DMA_A, audio_devs[devc->codec_audiodev]->dmap_out->raw_buf_phys, size, DMA_MODE_WRITE); /* * Wait until transfer completes. */ done = 0; timeout_val = 30; while (!done && timeout_val-- > 0) { int resid; if (HZ / 50) sleep(HZ / 50); clear_dma_ff(devc->dma); if ((resid = get_dma_residue(devc->dma)) == 0) done = 1; } restore_flags(flags); if (!done) return 0; if (flag & CPF_LAST) { /* * Take the board out of reset */ outb((0x00), PORT(HOST_CTRL)); outb((0x00), PORT(MIDI_CTRL)); temp = sscape_read(devc, GA_HMCTL_REG); temp |= 0x40; sscape_write(devc, GA_HMCTL_REG, temp); /* Kickstart the board */ /* * Wait until the ODB wakes up */ save_flags(flags); cli(); done = 0; timeout_val = 5 * HZ; while (!done && timeout_val-- > 0) { unsigned char x; sleep(1); x = inb(PORT(HOST_DATA)); if (x == 0xff || x == 0xfe) /* OBP startup acknowledge */ { DDB(printk("Soundscape: Acknowledge = %x\n", x)); done = 1; } } sscape_write(devc, GA_CDCFG_REG, codec_dma_bits); restore_flags(flags); if (!done) { printk(KERN_ERR "soundscape: The OBP didn't respond after code download\n"); return 0; } save_flags(flags); cli(); done = 0; timeout_val = 5 * HZ; while (!done && timeout_val-- > 0) { sleep(1); if (inb(PORT(HOST_DATA)) == 0xfe) /* Host startup acknowledge */ done = 1; } restore_flags(flags); if (!done) { printk(KERN_ERR "soundscape: OBP Initialization failed.\n"); return 0; } printk(KERN_INFO "SoundScape board initialized OK\n"); set_control(devc, CTL_MASTER_VOL, 100); set_control(devc, CTL_SYNTH_VOL, 100); #ifdef SSCAPE_DEBUG3 /* * Temporary debugging aid. Print contents of the registers after * downloading the code. */ { int i; for (i = 0; i < 13; i++) printk("I%d = %02x (new value)\n", i, sscape_read(devc, i)); } #endif } return 1; }
/** * pcm3718_dma_isr - interrupt service routine for DMA * data acquisition * * ptr: point to the private data of device object */ static void pcm3718_dma_isr(private_data *ptr) { unsigned long ret,flags,i; private_data *privdata = ptr; adv_user_page *page = NULL; INT16U tmp; /* recieve data */ //while(advInp(privdata,8)&0x80) ; //while(!advInp(privdata,8)&0x10) ; i = 0; privdata->item = 0; privdata->page_index=0; //memset(privdata->user_buf,0,privdata->hwdmasize[0]); //printk("----------%x convert %x\n",advInp(privdata,8)&0x10,privdata->hwdmasize[0]); advOutp( privdata, 0x08, 0 ); // clear interrupt request advOutp( privdata, 0x19, 0 ); // clear interrupt request do { page = privdata->user_pages + privdata->page_index; if (privdata->item >= page->length) { privdata->page_index++; privdata->item = 0; } privdata->page_index %= privdata->page_num; privdata->cur_index %= privdata->conv_num; i++; page = privdata->user_pages + privdata->page_index; // read data tmp = privdata->dmabuf[privdata->cur_index]; memcpy((INT16U *) (page->page_addr + page->offset + privdata->item), &tmp, sizeof(INT16U)); //if(tmp&0x01!=1 ) { if(i<10 ) { // printk("i :%x advin %x\n",i,tmp); // printk("i :%x advin %x\n",i,advInp(privdata,1)); // printk("userbuf : %x\n",privdata->user_buf[i]); // printk("hwuserbuf :%x\n",privdata->hwdmaptr[i]); } privdata->item += 2; privdata->cur_index++; } while (privdata->cur_index < privdata->conv_num); // memcpy(privdata->user_buf,privdata->dmabuf,privdata->hwdmasize[0]); //memset(privdata->dmabuf,0,privdata->hwdmasize[0]); /// printk("user buf 0 :%x\n",privdata->user_buf[0]); /// printk("user-1 buf %d :%x\n",i-1,privdata->user_buf[i-1]); /// printk("user buf %d :%x\n",i,privdata->user_buf[i]); // printk("user buf 20 :%x\n",privdata->user_buf[20]); /// printk("cur_index:%d conv_num/2:%d tmp:%x\n",privdata->cur_index,privdata->conv_num/2,tmp); //printk("page index:%x page num:%x\n",privdata->page_index,privdata->page_num); if (!privdata->buf_stat) { privdata->cur_index = privdata->conv_num / 2; privdata->half_ready = 1; adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1); } else { privdata->cur_index = privdata->conv_num; privdata->half_ready = 2; privdata->trans += privdata->conv_num; adv_process_info_set_event_all(&privdata->ptr_process_info, 1, 1); if (!privdata->cyclic) { /* terminate */ adv_process_info_set_event_all(&privdata->ptr_process_info, 2, 1); advOutp( privdata, 9, 0 ); // disable interrupt advOutp( privdata, 6, 0 ); // disable interrupt advOutp(privdata,8,0); // privdata->ai_stop = 1; } else { /* buffer change */ if (privdata->overrun_flag==1) { /* overrun */ adv_process_info_set_event_all(&privdata->ptr_process_info, 3, 1); } privdata->overrun_flag = 1; } } if (privdata->cur_index == privdata->int_cnt) { /* interrupt count */ //adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1); } // for(i=0;i<privdata->conv_num*sizeof(INT16U);i++){ // privdata->user_buf[i] = privdata->dmabuf[i]; // } if(privdata->cyclic){ flags=claim_dma_lock(); disable_dma(privdata->ioDMAbase); clear_dma_ff(privdata->ioDMAbase); set_dma_mode(privdata->ioDMAbase, DMA_MODE_READ); //set_dma_mode(privdata->ioDMAbase, DMA_MODE_READ|DMA_AUTOINIT); set_dma_addr(privdata->ioDMAbase, privdata->hwdmaptr); set_dma_count(privdata->ioDMAbase, privdata->hwdmasize[0]); ret = get_dma_residue(privdata->ioDMAbase); release_dma_lock(flags); } enable_dma(privdata->ioDMAbase); wake_up_interruptible(&privdata->event_wait); //privdata->buf_stat = !privdata->buf_stat; }
static int sscape_download_boot (struct sscape_info *devc, unsigned char *block, int size, int flag) { unsigned long flags; unsigned char temp; int done, timeout_val; static unsigned char codec_dma_bits = 0; if (flag & CPF_FIRST) { /* * First block. Have to allocate DMA and to reset the board * before continuing. */ save_flags (flags); cli (); codec_dma_bits = sscape_read (devc, GA_CDCFG_REG); #if 0 sscape_write (devc, GA_CDCFG_REG, codec_dma_bits & ~0x08); /* Disable codec DMA */ #endif if (devc->dma_allocated == 0) { devc->dma_allocated = 1; } restore_flags (flags); sscape_write (devc, GA_HMCTL_REG, (temp = sscape_read (devc, GA_HMCTL_REG)) & 0x3f); /*Reset */ for (timeout_val = 10000; timeout_val > 0; timeout_val--) sscape_read (devc, GA_HMCTL_REG); /* Delay */ /* Take board out of reset */ sscape_write (devc, GA_HMCTL_REG, (temp = sscape_read (devc, GA_HMCTL_REG)) | 0x80); } /* * Transfer one code block using DMA */ memcpy (audio_devs[devc->my_audiodev]->dmap_out->raw_buf, block, size); save_flags (flags); cli (); /******** INTERRUPTS DISABLED NOW ********/ do_dma (devc, SSCAPE_DMA_A, audio_devs[devc->my_audiodev]->dmap_out->raw_buf_phys, size, DMA_MODE_WRITE); /* * Wait until transfer completes. */ sscape_sleep_flag.flags = WK_NONE; done = 0; timeout_val = 100; while (!done && timeout_val-- > 0) { int resid; { unsigned long tlimit; if (1) current_set_timeout (tlimit = jiffies + (1)); else tlimit = (unsigned long) -1; sscape_sleep_flag.flags = WK_SLEEP; module_interruptible_sleep_on (&sscape_sleeper); if (!(sscape_sleep_flag.flags & WK_WAKEUP)) { if (jiffies >= tlimit) sscape_sleep_flag.flags |= WK_TIMEOUT; } sscape_sleep_flag.flags &= ~WK_SLEEP; }; clear_dma_ff (devc->dma); if ((resid = get_dma_residue (devc->dma)) == 0) { done = 1; } } restore_flags (flags); if (!done) return 0; if (flag & CPF_LAST) { /* * Take the board out of reset */ outb (0x00, PORT (HOST_CTRL)); outb (0x00, PORT (MIDI_CTRL)); temp = sscape_read (devc, GA_HMCTL_REG); temp |= 0x40; sscape_write (devc, GA_HMCTL_REG, temp); /* Kickstart the board */ /* * Wait until the ODB wakes up */ save_flags (flags); cli (); done = 0; timeout_val = 5 * HZ; while (!done && timeout_val-- > 0) { { unsigned long tlimit; if (1) current_set_timeout (tlimit = jiffies + (1)); else tlimit = (unsigned long) -1; sscape_sleep_flag.flags = WK_SLEEP; module_interruptible_sleep_on (&sscape_sleeper); if (!(sscape_sleep_flag.flags & WK_WAKEUP)) { if (jiffies >= tlimit) sscape_sleep_flag.flags |= WK_TIMEOUT; } sscape_sleep_flag.flags &= ~WK_SLEEP; }; if (inb (PORT (HOST_DATA)) == 0xff) /* OBP startup acknowledge */ done = 1; } sscape_write (devc, GA_CDCFG_REG, codec_dma_bits); restore_flags (flags); if (!done) { printk ("SoundScape: The OBP didn't respond after code download\n"); return 0; } save_flags (flags); cli (); done = 0; timeout_val = 5 * HZ; while (!done && timeout_val-- > 0) { { unsigned long tlimit; if (1) current_set_timeout (tlimit = jiffies + (1)); else tlimit = (unsigned long) -1; sscape_sleep_flag.flags = WK_SLEEP; module_interruptible_sleep_on (&sscape_sleeper); if (!(sscape_sleep_flag.flags & WK_WAKEUP)) { if (jiffies >= tlimit) sscape_sleep_flag.flags |= WK_TIMEOUT; } sscape_sleep_flag.flags &= ~WK_SLEEP; }; if (inb (PORT (HOST_DATA)) == 0xfe) /* Host startup acknowledge */ done = 1; } restore_flags (flags); if (!done) { printk ("SoundScape: OBP Initialization failed.\n"); return 0; } printk ("SoundScape board of type %d initialized OK\n", get_board_type (devc)); set_control (devc, CTL_MASTER_VOL, 100); set_control (devc, CTL_SYNTH_VOL, 100); #ifdef SSCAPE_DEBUG3 /* * Temporary debugging aid. Print contents of the registers after * downloading the code. */ { int i; for (i = 0; i < 13; i++) printk ("I%d = %02x (new value)\n", i, sscape_read (devc, i)); } #endif } return 1; }
/* Receive interrupt handler for the A channel */ static void a_rxint(struct device *dev, struct pi_local *lp) { unsigned long flags; int cmd; int bytecount; char rse; struct sk_buff *skb; int sksize, pkt_len; struct mbuf *cur_buf; unsigned char *cfix; save_flags(flags); cli(); /* disable interrupts */ cmd = lp->base + CTL; rse = rdscc(lp->cardbase, cmd, R1); /* Get special condition bits from R1 */ if (rse & Rx_OVR) lp->rstate = RXERROR; if (rse & END_FR) { /* If end of frame */ /* figure length of frame from 8237 */ clear_dma_ff(lp->dmachan); bytecount = lp->bufsiz - get_dma_residue(lp->dmachan); if ((rse & CRC_ERR) || (lp->rstate > ACTIVE) || (bytecount < 10)) { if ((bytecount >= 10) && (rse & CRC_ERR)) { lp->stats.rx_crc_errors++; } if (lp->rstate == RXERROR) { lp->stats.rx_errors++; lp->stats.rx_over_errors++; } /* Reset buffer pointers */ lp->rstate = ACTIVE; setup_rx_dma(lp); } else { /* Here we have a valid frame */ /* Toss 2 crc bytes , add one for KISS */ pkt_len = lp->rcvbuf->cnt = bytecount - 2 + 1; /* Get buffer for next frame */ cur_buf = lp->rcvbuf; switchbuffers(lp); setup_rx_dma(lp); /* Malloc up new buffer. */ sksize = pkt_len; skb = dev_alloc_skb(sksize); if (skb == NULL) { printk(KERN_ERR "PI: %s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; restore_flags(flags); return; } skb->dev = dev; /* KISS kludge - prefix with a 0 byte */ cfix=skb_put(skb,pkt_len); *cfix++=0; /* 'skb->data' points to the start of sk_buff data area. */ memcpy(cfix, (char *) cur_buf->data, pkt_len - 1); skb->protocol=htons(ETH_P_AX25); skb->mac.raw=skb->data; IS_SKB(skb); netif_rx(skb); lp->stats.rx_packets++; } /* end good frame */ } /* end EOF check */ wrtscc(lp->cardbase, lp->base + CTL, R0, ERR_RES); /* error reset */ restore_flags(flags); }
static void z8530_rx_done(struct z8530_channel *c) { struct sk_buff *skb; int ct; /* * Is our receive engine in DMA mode */ if(c->rxdma_on) { /* * Save the ready state and the buffer currently * being used as the DMA target */ int ready=c->dma_ready; unsigned char *rxb=c->rx_buf[c->dma_num]; unsigned long flags; /* * Complete this DMA. Neccessary to find the length */ flags=claim_dma_lock(); disable_dma(c->rxdma); clear_dma_ff(c->rxdma); c->rxdma_on=0; ct=c->mtu-get_dma_residue(c->rxdma); if(ct<0) ct=2; /* Shit happens.. */ c->dma_ready=0; /* * Normal case: the other slot is free, start the next DMA * into it immediately. */ if(ready) { c->dma_num^=1; set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); set_dma_count(c->rxdma, c->mtu); c->rxdma_on = 1; enable_dma(c->rxdma); /* Stop any frames that we missed the head of from passing */ write_zsreg(c, R0, RES_Rx_CRC); } else /* Can't occur as we dont reenable the DMA irq until after the flip is done */ printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); release_dma_lock(flags); /* * Shove the old buffer into an sk_buff. We can't DMA * directly into one on a PC - it might be above the 16Mb * boundary. Optimisation - we could check to see if we * can avoid the copy. Optimisation 2 - make the memcpy * a copychecksum. */ skb = dev_alloc_skb(ct); if (skb == NULL) { c->netdevice->stats.rx_dropped++; printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); } else { skb_put(skb, ct); skb_copy_to_linear_data(skb, rxb, ct); c->netdevice->stats.rx_packets++; c->netdevice->stats.rx_bytes += ct; } c->dma_ready = 1; } else { RT_LOCK; skb = c->skb; /* * The game we play for non DMA is similar. We want to * get the controller set up for the next packet as fast * as possible. We potentially only have one byte + the * fifo length for this. Thus we want to flip to the new * buffer and then mess around copying and allocating * things. For the current case it doesn't matter but * if you build a system where the sync irq isnt blocked * by the kernel IRQ disable then you need only block the * sync IRQ for the RT_LOCK area. * */ ct=c->count; c->skb = c->skb2; c->count = 0; c->max = c->mtu; if (c->skb) { c->dptr = c->skb->data; c->max = c->mtu; } else { c->count = 0; c->max = 0; } RT_UNLOCK; c->skb2 = dev_alloc_skb(c->mtu); if (c->skb2 == NULL) printk(KERN_WARNING "%s: memory squeeze.\n", c->netdevice->name); else skb_put(c->skb2, c->mtu); c->netdevice->stats.rx_packets++; c->netdevice->stats.rx_bytes += ct; } /* * If we received a frame we must now process it. */ if (skb) { skb_trim(skb, ct); c->rx_function(c, skb); } else { c->netdevice->stats.rx_dropped++; printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); } }
static void z8530_tx_begin(struct z8530_channel *c) { unsigned long flags; if(c->tx_skb) return; c->tx_skb=c->tx_next_skb; c->tx_next_skb=NULL; c->tx_ptr=c->tx_next_ptr; if(c->tx_skb==NULL) { /* Idle on */ if(c->dma_tx) { flags=claim_dma_lock(); disable_dma(c->txdma); /* * Check if we crapped out. */ if (get_dma_residue(c->txdma)) { c->netdevice->stats.tx_dropped++; c->netdevice->stats.tx_fifo_errors++; } release_dma_lock(flags); } c->txcount=0; } else { c->txcount=c->tx_skb->len; if(c->dma_tx) { /* * FIXME. DMA is broken for the original 8530, * on the older parts we need to set a flag and * wait for a further TX interrupt to fire this * stage off */ flags=claim_dma_lock(); disable_dma(c->txdma); /* * These two are needed by the 8530/85C30 * and must be issued when idling. */ if(c->dev->type!=Z85230) { write_zsctrl(c, RES_Tx_CRC); write_zsctrl(c, RES_EOM_L); } write_zsreg(c, R10, c->regs[10]&~ABUNDER); clear_dma_ff(c->txdma); set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); set_dma_count(c->txdma, c->txcount); enable_dma(c->txdma); release_dma_lock(flags); write_zsctrl(c, RES_EOM_L); write_zsreg(c, R5, c->regs[R5]|TxENAB); } else { /* ABUNDER off */ write_zsreg(c, R10, c->regs[10]); write_zsctrl(c, RES_Tx_CRC); while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) { write_zsreg(c, R8, *c->tx_ptr++); c->txcount--; } } } /* * Since we emptied tx_skb we can ask for more */ netif_wake_queue(c->netdevice); }
static int __init ltpc_probe_dma(int base, int dma) { int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3; unsigned long timeout; unsigned long f; if (want & 1) { if (request_dma(1,"ltpc")) { want &= ~1; } else { f=claim_dma_lock(); disable_dma(1); clear_dma_ff(1); set_dma_mode(1,DMA_MODE_WRITE); set_dma_addr(1,virt_to_bus(ltdmabuf)); set_dma_count(1,sizeof(struct lt_mem)); enable_dma(1); release_dma_lock(f); } } if (want & 2) { if (request_dma(3,"ltpc")) { want &= ~2; } else { f=claim_dma_lock(); disable_dma(3); clear_dma_ff(3); set_dma_mode(3,DMA_MODE_WRITE); set_dma_addr(3,virt_to_bus(ltdmabuf)); set_dma_count(3,sizeof(struct lt_mem)); enable_dma(3); release_dma_lock(f); } } /* set up request */ /* FIXME -- do timings better! */ ltdmabuf[0] = LT_READMEM; ltdmabuf[1] = 1; /* mailbox */ ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */ ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */ ltdmabuf[6] = 0; /* dunno if this is necessary */ inb_p(io+1); inb_p(io+0); timeout = jiffies+100*HZ/100; while(time_before(jiffies, timeout)) { if ( 0xfa == inb_p(io+6) ) break; } inb_p(io+3); inb_p(io+2); while(time_before(jiffies, timeout)) { if ( 0xfb == inb_p(io+6) ) break; } /* release the other dma channel (if we opened both of them) */ if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) { want &= ~2; free_dma(3); } if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) { want &= ~1; free_dma(1); } if (!want) return 0; return (want & 2) ? 3 : 1; }