Exemplo n.º 1
0
static void z8530_tx_begin(struct z8530_channel *c)
{
	unsigned long flags;
	if(c->tx_skb)
		return;
		
	c->tx_skb=c->tx_next_skb;
	c->tx_next_skb=NULL;
	c->tx_ptr=c->tx_next_ptr;
	
	if(c->tx_skb==NULL)
	{
		/* Idle on */
		if(c->dma_tx)
		{
			flags=claim_dma_lock();
			disable_dma(c->txdma);
			/*
			 *	Check if we crapped out.
			 */
			if (get_dma_residue(c->txdma))
			{
				c->netdevice->stats.tx_dropped++;
				c->netdevice->stats.tx_fifo_errors++;
			}
			release_dma_lock(flags);
		}
		c->txcount=0;
	}
	else
	{
		c->txcount=c->tx_skb->len;
		
		
		if(c->dma_tx)
		{
			/*
			 *	FIXME. DMA is broken for the original 8530,
			 *	on the older parts we need to set a flag and
			 *	wait for a further TX interrupt to fire this
			 *	stage off	
			 */
			 
			flags=claim_dma_lock();
			disable_dma(c->txdma);

			/*
			 *	These two are needed by the 8530/85C30
			 *	and must be issued when idling.
			 */
			 
			if(c->dev->type!=Z85230)
			{
				write_zsctrl(c, RES_Tx_CRC);
				write_zsctrl(c, RES_EOM_L);
			}	
			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
			clear_dma_ff(c->txdma);
			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
			set_dma_count(c->txdma, c->txcount);
			enable_dma(c->txdma);
			release_dma_lock(flags);
			write_zsctrl(c, RES_EOM_L);
			write_zsreg(c, R5, c->regs[R5]|TxENAB);
		}
		else
		{

			/* ABUNDER off */
			write_zsreg(c, R10, c->regs[10]);
			write_zsctrl(c, RES_Tx_CRC);
	
			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
			{		
				write_zsreg(c, R8, *c->tx_ptr++);
				c->txcount--;
			}

		}
	}
	/*
	 *	Since we emptied tx_skb we can ask for more
	 */
	netif_wake_queue(c->netdevice);
}
struct net_device * __init ltpc_probe(void)
{
	struct net_device *dev;
	int err = -ENOMEM;
	int x=0,y=0;
	int autoirq;
	unsigned long f;
	unsigned long timeout;

	dev = alloc_ltalkdev(sizeof(struct ltpc_private));
	if (!dev)
		goto out;

	/* probe for the I/O port address */
	
	if (io != 0x240 && request_region(0x220,8,"ltpc")) {
		x = inb_p(0x220+6);
		if ( (x!=0xff) && (x>=0xf0) ) {
			io = 0x220;
			goto got_port;
		}
		release_region(0x220,8);
	}
	if (io != 0x220 && request_region(0x240,8,"ltpc")) {
		y = inb_p(0x240+6);
		if ( (y!=0xff) && (y>=0xf0) ){ 
			io = 0x240;
			goto got_port;
		}
		release_region(0x240,8);
	} 

	/* give up in despair */
	printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
	err = -ENODEV;
	goto out1;

 got_port:
	/* probe for the IRQ line */
	if (irq < 2) {
		unsigned long irq_mask;

		irq_mask = probe_irq_on();
		/* reset the interrupt line */
		inb_p(io+7);
		inb_p(io+7);
		/* trigger an interrupt (I hope) */
		inb_p(io+6);
		mdelay(2);
		autoirq = probe_irq_off(irq_mask);

		if (autoirq == 0) {
			printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
		} else {
			irq = autoirq;
		}
	}

	/* allocate a DMA buffer */
	ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
	if (!ltdmabuf) {
		printk(KERN_ERR "ltpc: mem alloc failed\n");
		err = -ENOMEM;
		goto out2;
	}

	ltdmacbuf = &ltdmabuf[800];

	if(debug & DEBUG_VERBOSE) {
		printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
	}

	/* reset the card */

	inb_p(io+1);
	inb_p(io+3);

	msleep(20);

	inb_p(io+0);
	inb_p(io+2);
	inb_p(io+7); /* clear reset */
	inb_p(io+4); 
	inb_p(io+5);
	inb_p(io+5); /* enable dma */
	inb_p(io+6); /* tri-state interrupt line */

	ssleep(1);
	
	/* now, figure out which dma channel we're using, unless it's
	   already been specified */
	/* well, 0 is a legal DMA channel, but the LTPC card doesn't
	   use it... */
	dma = ltpc_probe_dma(io, dma);
	if (!dma) {  /* no dma channel */
		printk(KERN_ERR "No DMA channel found on ltpc card.\n");
		err = -ENODEV;
		goto out3;
	}

	/* print out friendly message */
	if(irq)
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
	else
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d.  Using polled mode.\n",io,dma);

	dev->netdev_ops = &ltpc_netdev;
	dev->base_addr = io;
	dev->irq = irq;
	dev->dma = dma;

	/* the card will want to send a result at this point */
	/* (I think... leaving out this part makes the kernel crash,
           so I put it back in...) */

	f=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,0x100);
	enable_dma(dma);
	release_dma_lock(f);

	(void) inb_p(io+3);
	(void) inb_p(io+2);
	timeout = jiffies+100*HZ/100;

	while(time_before(jiffies, timeout)) {
		if( 0xf9 == inb_p(io+6))
			break;
		schedule();
	}

	if(debug & DEBUG_VERBOSE) {
		printk("setting up timer and irq\n");
	}

	/* grab it and don't let go :-) */
	if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
	{
		(void) inb_p(io+7);  /* enable interrupts from board */
		(void) inb_p(io+7);  /* and reset irq line */
	} else {
		if( irq )
			printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
		dev->irq = 0;
		/* polled mode -- 20 times per second */
		/* this is really, really slow... should it poll more often? */
		init_timer(&ltpc_timer);
		ltpc_timer.function=ltpc_poll;
		ltpc_timer.data = (unsigned long) dev;

		ltpc_timer.expires = jiffies + HZ/20;
		add_timer(&ltpc_timer);
	}
	err = register_netdev(dev);
	if (err)
		goto out4;

	return NULL;
out4:
	del_timer_sync(&ltpc_timer);
	if (dev->irq)
		free_irq(dev->irq, dev);
out3:
	free_pages((unsigned long)ltdmabuf, get_order(1000));
out2:
	release_region(io, 8);
out1:
	free_netdev(dev);
out:
	return ERR_PTR(err);
}
static int __init ltpc_probe_dma(int base, int dma)
{
	int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
  	unsigned long timeout;
  	unsigned long f;
  
  	if (want & 1) {
		if (request_dma(1,"ltpc")) {
			want &= ~1;
		} else {
			f=claim_dma_lock();
			disable_dma(1);
			clear_dma_ff(1);
			set_dma_mode(1,DMA_MODE_WRITE);
			set_dma_addr(1,virt_to_bus(ltdmabuf));
			set_dma_count(1,sizeof(struct lt_mem));
			enable_dma(1);
			release_dma_lock(f);
		}
	}
	if (want & 2) {
		if (request_dma(3,"ltpc")) {
			want &= ~2;
		} else {
			f=claim_dma_lock();
			disable_dma(3);
			clear_dma_ff(3);
			set_dma_mode(3,DMA_MODE_WRITE);
			set_dma_addr(3,virt_to_bus(ltdmabuf));
			set_dma_count(3,sizeof(struct lt_mem));
			enable_dma(3);
			release_dma_lock(f);
		}
	}
	/* set up request */

	/* FIXME -- do timings better! */

	ltdmabuf[0] = LT_READMEM;
	ltdmabuf[1] = 1;  /* mailbox */
	ltdmabuf[2] = 0; ltdmabuf[3] = 0;  /* address */
	ltdmabuf[4] = 0; ltdmabuf[5] = 1;  /* read 0x0100 bytes */
	ltdmabuf[6] = 0; /* dunno if this is necessary */

	inb_p(io+1);
	inb_p(io+0);
	timeout = jiffies+100*HZ/100;
	while(time_before(jiffies, timeout)) {
		if ( 0xfa == inb_p(io+6) ) break;
	}

	inb_p(io+3);
	inb_p(io+2);
	while(time_before(jiffies, timeout)) {
		if ( 0xfb == inb_p(io+6) ) break;
	}

	/* release the other dma channel (if we opened both of them) */

	if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
		want &= ~2;
		free_dma(3);
	}

	if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
		want &= ~1;
		free_dma(1);
	}

	if (!want)
		return 0;

	return (want & 2) ? 3 : 1;
}
Exemplo n.º 4
0
/***************************************************************** Detection */
int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
{
	struct NCR_ESP *esp;
	struct zorro_dev *z = NULL;
	unsigned long address;
	struct ESP_regs *eregs;
	unsigned long board;

#if MKIV
#define REAL_BLZ1230_ID		ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260
#define REAL_BLZ1230_ESP_ADDR	BLZ1230_ESP_ADDR
#define REAL_BLZ1230_DMA_ADDR	BLZ1230_DMA_ADDR
#else
#define REAL_BLZ1230_ID		ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060
#define REAL_BLZ1230_ESP_ADDR	BLZ1230II_ESP_ADDR
#define REAL_BLZ1230_DMA_ADDR	BLZ1230II_DMA_ADDR
#endif

	if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) {
	    board = z->resource.start;
	    if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR,
				   sizeof(struct ESP_regs), "NCR53C9x")) {
		/* Do some magic to figure out if the blizzard is
		 * equipped with a SCSI controller
		 */
		address = ZTWO_VADDR(board);
		eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
		esp = esp_allocate(tpnt, (void *)board+REAL_BLZ1230_ESP_ADDR);

		esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
		udelay(5);
		if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
			goto err_out;

		/* Do command transfer with programmed I/O */
		esp->do_pio_cmds = 1;

		/* Required functions */
		esp->dma_bytes_sent = &dma_bytes_sent;
		esp->dma_can_transfer = &dma_can_transfer;
		esp->dma_dump_state = &dma_dump_state;
		esp->dma_init_read = &dma_init_read;
		esp->dma_init_write = &dma_init_write;
		esp->dma_ints_off = &dma_ints_off;
		esp->dma_ints_on = &dma_ints_on;
		esp->dma_irq_p = &dma_irq_p;
		esp->dma_ports_p = &dma_ports_p;
		esp->dma_setup = &dma_setup;

		/* Optional functions */
		esp->dma_barrier = 0;
		esp->dma_drain = 0;
		esp->dma_invalidate = 0;
		esp->dma_irq_entry = 0;
		esp->dma_irq_exit = 0;
		esp->dma_led_on = 0;
		esp->dma_led_off = 0;
		esp->dma_poll = 0;
		esp->dma_reset = 0;

		/* SCSI chip speed */
		esp->cfreq = 40000000;

		/* The DMA registers on the Blizzard are mapped
		 * relative to the device (i.e. in the same Zorro
		 * I/O block).
		 */
		esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR);
	
		/* ESP register base */
		esp->eregs = eregs;

		/* Set the command buffer */
		esp->esp_command = cmd_buffer;
		esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);

		esp->irq = IRQ_AMIGA_PORTS;
		esp->slot = board+REAL_BLZ1230_ESP_ADDR;
		if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
				 "Blizzard 1230 SCSI IV", esp->ehost))
			goto err_out;

		/* Figure out our scsi ID on the bus */
		esp->scsi_id = 7;
		
		/* We don't have a differential SCSI-bus. */
		esp->diff = 0;

		esp_initialize(esp);

		printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
		esps_running = esps_in_use;
		return esps_in_use;
	    }
	}
	return 0;
 
 err_out:
	scsi_unregister(esp->ehost);
	esp_deallocate(esp);
	release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
			   sizeof(struct ESP_regs));
	return 0;
}
Exemplo n.º 5
0
static inline phys_addr
va_to_pa(void *x) {
	return x ? virt_to_bus(x) : I596_NULL;
}
Exemplo n.º 6
0
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
    unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
    unsigned long addr = virt_to_bus(cmd->SCp.ptr);
    struct Scsi_Host *instance = cmd->device->host;

    /* don't allow DMA if the physical address is bad */
    if (addr & A2091_XFER_MASK)
    {
	HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
	    & ~0x1ff;
	HDATA(instance)->dma_bounce_buffer =
	    kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
	
	/* can't allocate memory; use PIO */
	if (!HDATA(instance)->dma_bounce_buffer) {
	    HDATA(instance)->dma_bounce_len = 0;
	    return 1;
	}

	/* get the physical address of the bounce buffer */
	addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);

	/* the bounce buffer may not be in the first 16M of physmem */
	if (addr & A2091_XFER_MASK) {
	    /* we could use chipmem... maybe later */
	    kfree (HDATA(instance)->dma_bounce_buffer);
	    HDATA(instance)->dma_bounce_buffer = NULL;
	    HDATA(instance)->dma_bounce_len = 0;
	    return 1;
	}

	if (!dir_in) {
		/* copy to bounce buffer for a write */
		memcpy (HDATA(instance)->dma_bounce_buffer,
			cmd->SCp.ptr, cmd->SCp.this_residual);
	}
    }

    /* setup dma direction */
    if (!dir_in)
	cntr |= CNTR_DDIR;

    /* remember direction */
    HDATA(cmd->device->host)->dma_dir = dir_in;

    DMA(cmd->device->host)->CNTR = cntr;

    /* setup DMA *physical* address */
    DMA(cmd->device->host)->ACR = addr;

    if (dir_in){
	/* invalidate any cache */
	cache_clear (addr, cmd->SCp.this_residual);
    }else{
	/* push any dirty cache */
	cache_push (addr, cmd->SCp.this_residual);
      }
    /* start DMA */
    DMA(cmd->device->host)->ST_DMA = 1;

    /* return success */
    return 0;
}
Exemplo n.º 7
0
void __init setup_arch(char **cmdline_p)
{
	unsigned long bootmap_size;
	unsigned long start_pfn, max_pfn, max_low_pfn;

#ifdef CONFIG_EARLY_PRINTK
	extern void enable_early_printk(void);

	enable_early_printk();
#endif
#ifdef CONFIG_CMDLINE_BOOL
        strcpy(COMMAND_LINE, CONFIG_CMDLINE);
#endif

	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) _text;
	init_mm.end_code = (unsigned long) _etext;
	init_mm.end_data = (unsigned long) _edata;
	init_mm.brk = (unsigned long) _end;

	code_resource.start = virt_to_bus(_text);
	code_resource.end = virt_to_bus(_etext)-1;
	data_resource.start = virt_to_bus(_etext);
	data_resource.end = virt_to_bus(_edata)-1;

	sh_mv_setup(cmdline_p);

#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
#define PFN_PHYS(x)	((x) << PAGE_SHIFT)

#ifdef CONFIG_DISCONTIGMEM
	NODE_DATA(0)->bdata = &discontig_node_bdata[0];
	NODE_DATA(1)->bdata = &discontig_node_bdata[1];

	bootmap_size = init_bootmem_node(NODE_DATA(1), 
					 PFN_UP(__MEMORY_START_2ND),
					 PFN_UP(__MEMORY_START_2ND),
					 PFN_DOWN(__MEMORY_START_2ND+__MEMORY_SIZE_2ND));
	free_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, __MEMORY_SIZE_2ND);
	reserve_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, bootmap_size);
#endif

	/*
	 * Find the highest page frame number we have available
	 */
	max_pfn = PFN_DOWN(__pa(memory_end));

	/*
	 * Determine low and high memory ranges:
	 */
	max_low_pfn = max_pfn;

 	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards:
 	 */
	start_pfn = PFN_UP(__pa(_end));

	/*
	 * Find a proper area for the bootmem bitmap. After this
	 * bootstrap step all allocations (until the page allocator
	 * is intact) must be done via bootmem_alloc().
	 */
	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
					 __MEMORY_START>>PAGE_SHIFT,
					 max_low_pfn);
	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	{
		unsigned long curr_pfn, last_pfn, pages;

		/*
		 * We are rounding up the start address of usable memory:
		 */
		curr_pfn = PFN_UP(__MEMORY_START);
		/*
		 * ... and at the end of the usable range downwards:
		 */
		last_pfn = PFN_DOWN(__pa(memory_end));

		if (last_pfn > max_low_pfn)
			last_pfn = max_low_pfn;

		pages = last_pfn - curr_pfn;
		free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn),
				  PFN_PHYS(pages));
	}

	/*
	 * Reserve the kernel text and
	 * Reserve the bootmem bitmap. We do this in two steps (first step
	 * was init_bootmem()), because this catches the (definitely buggy)
	 * case of us accidentally initializing the bootmem allocator with
	 * an invalid RAM area.
	 */
	reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE,
		(PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE);

#ifdef CONFIG_BLK_DEV_INITRD
 	ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
 	if (&__rd_start != &__rd_end) {
		LOADER_TYPE = 1;
		INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START;
		INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start;
 	}

	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
			reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			printk("initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				    INITRD_START + INITRD_SIZE,
				    max_low_pfn << PAGE_SHIFT);
			initrd_start = 0;
		}
	}
#endif

#ifdef CONFIG_DUMMY_CONSOLE
	conswitchp = &dummy_con;
#endif

	/* Perform the machine specific initialisation */
	platform_setup();

	paging_init();
}
static inline unsigned long cpu2cpm_addr(void *addr)
{
	if ((unsigned long)addr >= CPM_ADDR)
		return (unsigned long)addr;
	return virt_to_bus(addr);
}