Exemplo n.º 1
0
static void speedo_resume(struct dev *dev) {
  struct nic *sp = (struct nic *) dev->privdata;
  long ioaddr = sp->iobase;

  outpw(ioaddr + SCBCmd, SCBMaskAll);

  // Start with a Tx threshold of 256 (0x..20.... 8 byte units)
  sp->tx_threshold = 0x01208000;

  // Set the segment registers to '0'
  wait_for_cmd_done(ioaddr + SCBCmd);
  if (inp(ioaddr + SCBCmd)) {
    outpd(ioaddr + SCBPort, PortPartialReset);
    udelay(10);
  }
  outpd(ioaddr + SCBPointer, 0);
  inpd(ioaddr + SCBPointer);       // Flush to PCI
  udelay(10); // Bogus, but it avoids the bug

  // Note: these next two operations can take a while
  do_slow_command(dev, RxAddrLoad);
  do_slow_command(dev, CUCmdBase);

  // Load the statistics block and rx ring addresses
  outpd(ioaddr + SCBPointer, virt2phys(&sp->lstats));
  inpd(ioaddr + SCBPointer);       // Flush to PCI
  outp(ioaddr + SCBCmd, CUStatsAddr);
  sp->lstats.done_marker = 0;
  wait_for_cmd_done(ioaddr + SCBCmd);

  outpd(ioaddr + SCBPointer, virt2phys(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]));
  inpd(ioaddr + SCBPointer);       // Flush to PCI

  // Note: RxStart should complete instantly.
  do_slow_command(dev, RxStart);
  do_slow_command(dev, CUDumpStats);

  // Fill the first command with our physical address
  {
    int entry = sp->cur_tx++ % TX_RING_SIZE;
    struct descriptor *cur_cmd = (struct descriptor *) &sp->tx_ring[entry];

    // Avoid a bug(?!) here by marking the command already completed
    cur_cmd->cmd_status = (CmdSuspend | CmdIASetup) | 0xa000;
    cur_cmd->link = virt2phys(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
    memcpy(cur_cmd->params, sp->hwaddr.addr, 6);
    if (sp->last_cmd) clear_suspend(sp->last_cmd);
    sp->last_cmd = cur_cmd;
  }

  // Start the chip's Tx process and unmask interrupts
  outpd(ioaddr + SCBPointer, virt2phys(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]));
  outpw(ioaddr + SCBCmd, CUStart);
}
Exemplo n.º 2
0
static int speedo_transmit(struct dev *dev, struct pbuf *p) {
  struct nic *sp = (struct nic *) dev->privdata;
  long ioaddr = sp->iobase;
  int entry;

  p = pbuf_linearize(PBUF_RAW, p);
  if (!p) return -ENOMEM;

  // Wait for free entry in transmit ring
  if (wait_for_object(&sp->tx_sem, TX_TIMEOUT) < 0) {
    kprintf("%s: transmit timeout, drop packet\n", dev->name);
    sp->stats.tx_dropped++;
    return -ETIMEOUT;
  }

  // Caution: the write order is important here, set the base address
  // with the "ownership" bits last.

  // Calculate the Tx descriptor entry
  entry = sp->cur_tx % TX_RING_SIZE;

  sp->tx_pbuf[entry] = p;
  // TODO: be a little more clever about setting the interrupt bit
  sp->tx_ring[entry].status = CmdSuspend | CmdTx | CmdTxFlex;
  sp->cur_tx++;
  sp->tx_ring[entry].link = virt2phys(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
  sp->tx_ring[entry].tx_desc_addr = virt2phys(&sp->tx_ring[entry].tx_buf_addr0);
  // The data region is always in one buffer descriptor
  sp->tx_ring[entry].count = sp->tx_threshold;
  sp->tx_ring[entry].tx_buf_addr0 = virt2phys(p->payload);
  sp->tx_ring[entry].tx_buf_size0 = p->tot_len;
  
  // TODO: perhaps leave the interrupt bit set if the Tx queue is more
  // than half full.  Argument against: we should be receiving packets
  // and scavenging the queue.  Argument for: if so, it shouldn't
  // matter.

  {
    struct descriptor *last_cmd = sp->last_cmd;
    sp->last_cmd = (struct descriptor *) &sp->tx_ring[entry];
    clear_suspend(last_cmd);
  }
  
  wait_for_cmd_done(ioaddr + SCBCmd);
  outp(ioaddr + SCBCmd, CUResume);
  sp->trans_start = get_ticks();

  return 0;
}
Exemplo n.º 3
0
void *kmalloc_tag(int size, unsigned long tag) {
  struct bucket *b;
  int bucket;
  void *addr;

  // Handle large allocation by allocating pages
  if (size > PAGESIZE / 2) {
    // Allocate pages
    addr = alloc_pages(PAGES(size), tag ? tag : 'ALOC');

    // Set size in pfn entry
    pfdb[BTOP(virt2phys(addr))].size = PAGES(size) + PAGESHIFT;

    return addr;
  }

  // Otherwise allocate from one of the buckets
  bucket = BUCKET(size);
  b = &buckets[bucket];

  // If bucket is empty the allocate one more page for the bucket
  if (b->mem == 0) {
    char *p;
    int i;

    // Allocate new page
    addr = alloc_pages(1, 'HEAP');

    // Set bucket number in pfn entry
    pfdb[BTOP(virt2phys(addr))].size = bucket;

    // Split page into chunks
    p = (char *) addr;
    for (i = 0; i < PAGESIZE; i += b->size)  {
      *(void **)(p + i) = b->mem;
      b->mem = p + i;
    }

    // Update count of pages used for this bucket
    b->pages++;
  }

  // Allocate chunk from bucket
  addr = b->mem;
  b->mem = *(void **) addr;

  // Return allocated chunk
  return addr;
}
Exemplo n.º 4
0
static void rtl_hw_start(struct dev *dev) {
  struct nic *tp = (struct nic *) dev->privdata;
  long ioaddr = tp->iobase;
  int i;

  // Soft reset the chip
  outp(ioaddr + ChipCmd, CmdReset);

  // Check that the chip has finished the reset
  for (i = 1000; i > 0; i--) {
    if ((inp(ioaddr + ChipCmd) & CmdReset) == 0) break;
  }

  // Restore our idea of the MAC address
  outp(ioaddr + Cfg9346, 0xC0);
  outpd(ioaddr + MAC0 + 0, *(unsigned long *)(tp->hwaddr.addr + 0));
  outpd(ioaddr + MAC0 + 4, *(unsigned long *)(tp->hwaddr.addr + 4));

  // Hmmm, do these belong here?
  tp->cur_rx = 0;

  // Must enable Tx/Rx before setting transfer thresholds!
  outp(ioaddr + ChipCmd, CmdRxEnb | CmdTxEnb);
  outpd(ioaddr + RxConfig, tp->rx_config);

  // Check this value: the documentation contradicts ifself.  Is the
  // IFG correct with bit 28:27 zero, or with |0x03000000 ?
  outpd(ioaddr + TxConfig, (TX_DMA_BURST << 8));

  // This is check_duplex()
  if (tp->phys[0] >= 0 || (tp->flags & HAS_MII_XCVR)) {
    unsigned short mii_reg5 = mdio_read(dev, tp->phys[0], 5);
    if (mii_reg5 != 0xffff) {
      if ((mii_reg5 & 0x0100) == 0x0100 || (mii_reg5 & 0x00C0) == 0x0040) {
        tp->full_duplex = 1;
      }
    }

    kprintf(KERN_INFO "%s: Setting %s%s-duplex based on auto-negotiated partner ability %4.4x\n", 
      dev->name, 
      mii_reg5 == 0 ? "" : (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
      tp->full_duplex ? "full" : "half", mii_reg5);
  }

  if (tp->flags & HAS_MII_XCVR) {
    // RTL8129 chip
    outp(ioaddr + Config1, tp->full_duplex ? 0x60 : 0x20);
  }
  outp(ioaddr + Cfg9346, 0x00);

  outpd(ioaddr + RxBuf, virt2phys(tp->rx_ring));

  // Start the chip's Tx and Rx process
  outpd(ioaddr + RxMissed, 0);
  rtl8139_set_rx_mode(dev);
  outp(ioaddr + ChipCmd, CmdRxEnb | CmdTxEnb);
  
  // Enable all known interrupts by setting the interrupt mask
  outpw(ioaddr + IntrMask, PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK);
}
Exemplo n.º 5
0
static void speedo_init_rx_ring(struct dev *dev)
{
  struct nic *sp = (struct nic *) dev->privdata;
  struct RxFD *rxf, *last_rxf = NULL;
  int i;

  init_sem(&sp->tx_sem, TX_QUEUE_LIMIT);

  sp->cur_rx = 0;

  for (i = 0; i < RX_RING_SIZE; i++) {
    struct pbuf *p;

    p = pbuf_alloc(PBUF_RAW, PKT_BUF_SZ + sizeof(struct RxFD), PBUF_RW);
    sp->rx_pbuf[i] = p;
    if (p == NULL) break;      // OK. Just initially short of Rx bufs
    rxf = (struct RxFD *) p->payload;
    sp->rx_ringp[i] = rxf;
    pbuf_header(p, - (int) sizeof(struct RxFD));
    if (last_rxf) last_rxf->link = virt2phys(rxf);
    last_rxf = rxf;
    rxf->status = 0x00000001;  // '1' is flag value only
    rxf->link = 0;            // None yet
    rxf->rx_buf_addr = 0xffffffff;
    rxf->count = PKT_BUF_SZ << 16;
  }
  sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);

  // Mark the last entry as end-of-list.
  last_rxf->status = 0xC0000002; // '2' is flag value only
  sp->last_rxf = last_rxf;
}
Exemplo n.º 6
0
int vmfree(void *addr, unsigned long size, int type) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    if (type & (MEM_DECOMMIT | MEM_RELEASE)) {
        vaddr = (char *) addr;
        for (i = 0; i < pages; i++) {
            if (page_directory_mapped(vaddr)) {
                pte_t flags = get_page_flags(vaddr);
                unsigned long pfn = BTOP(virt2phys(vaddr));

                if (flags & PT_FILE) {
                    handle_t h = (flags & PT_PRESENT) ? pfdb[pfn].owner : pfn;
                    struct filemap *newfm = (struct filemap *) hlookup(h);
                    if (newfm != fm) {
                        if (fm) {
                            if (fm->pages == 0) {
                                rc = free_filemap(fm);
                            } else {
                                rc = unlock_filemap(fm);
                            }
                            if (rc < 0) return rc;
                        }
                        fm = newfm;
                        rc = wait_for_object(fm, INFINITE);
                        if (rc < 0) return rc;
                    }
                    fm->pages--;
                    unmap_page(vaddr);
                    if (flags & PT_PRESENT) free_pageframe(pfn);
                } else  if (flags & PT_PRESENT) {
                    unmap_page(vaddr);
                    free_pageframe(pfn);
                }
            }

            vaddr += PAGESIZE;
        }
    }

    if (fm) {
        if (fm->pages == 0) {
            rc = free_filemap(fm);
        } else {
            rc = unlock_filemap(fm);
        }
        if (rc < 0) return rc;
    } else if (type & MEM_RELEASE) {
        rmap_free(vmap, BTOP(addr), pages);
    }

    return 0;
}
Exemplo n.º 7
0
static int rtl8139_transmit(struct dev *dev, struct pbuf *p) {
  struct nic *tp = (struct nic *) dev->privdata;
  long ioaddr = tp->iobase;
  int entry;

  // Wait for free entry in transmit ring
  if (wait_for_object(&tp->tx_sem, TX_TIMEOUT) < 0) {
    kprintf(KERN_WARNING "%s: transmit timeout, drop packet\n", dev->name);
    tp->stats.tx_dropped++;
    return -ETIMEOUT;
  }

  // Calculate the next Tx descriptor entry
  entry = tp->cur_tx % NUM_TX_DESC;

  tp->tx_pbuf[entry] = p;
  if (p->next || ((unsigned long) (p->payload) & 3)) {
    struct pbuf *q;
    unsigned char *ptr;

    // Must use alignment buffer
    q = p;
    ptr = tp->tx_buf[entry];
    while (q) {
      memcpy(ptr, q->payload, q->len);
      ptr += q->len;
      q = q->next;
    }

    outpd(ioaddr + TxAddr0 + entry * 4, virt2phys(tp->tx_buf[entry]));
  } else {
    outpd(ioaddr + TxAddr0 + entry * 4, virt2phys(p->payload));
  }

  // Note: the chip doesn't have auto-pad!
  outpd(ioaddr + TxStatus0 + entry * 4, tp->tx_flag | (p->tot_len >= ETH_ZLEN ? p->tot_len : ETH_ZLEN));

  tp->trans_start = get_ticks();
  tp->cur_tx++;

  //kprintf("%s: Queued Tx packet at %p size %d to slot %d\n", dev->name, p->payload, p->tot_len, entry);

  return 0;
}
Exemplo n.º 8
0
void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data)
{
	static DEFINE_SPINLOCK(map_lock);

	virt2phys_t virt2phys = paging_hvirt2phys;
	void *stack_virt = cpu_data->stack;
	unsigned long stack_phys = virt2phys((void *)stack_virt);
	unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
	struct registers *regs_phys =
			(struct registers *)virt2phys(guest_regs(cpu_data));

	/* Jump to the identity-mapped trampoline page before shutting down */
	void (*shutdown_fun_phys)(struct registers*, unsigned long);
	shutdown_fun_phys = (void*)virt2phys(shutdown_el2);

	/*
	 * No need to check for size or overlapping here, it has already be
	 * done, and the paging structures will soon be deleted. However, the
	 * cells' CPUs may execute this concurrently.
	 */
	spin_lock(&map_lock);
	paging_create(&hv_paging_structs, stack_phys, PAGE_SIZE, stack_phys,
		      PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
	paging_create(&hv_paging_structs, trampoline_phys, PAGE_SIZE,
		      trampoline_phys, PAGE_DEFAULT_FLAGS,
		      PAGING_NON_COHERENT);
	spin_unlock(&map_lock);

	arch_cpu_dcaches_flush(CACHES_CLEAN);

	/*
	 * Final shutdown:
	 * - disable the MMU whilst inside the trampoline page
	 * - reset the vectors
	 * - return to EL1
	 */
	shutdown_fun_phys(regs_phys, saved_vectors);

	__builtin_unreachable();
}
Exemplo n.º 9
0
static void speedo_intr_error(struct dev *dev, int intr_status) {
  struct nic *sp = (struct nic *) dev->privdata;
  long ioaddr = sp->iobase;

  if (intr_status & IntrRxSuspend) {
    if ((intr_status & 0x003c) == 0x0028)
      // No more Rx buffers
      outp(ioaddr + SCBCmd, RxResumeNoResources);
    else if ((intr_status & 0x003c) == 0x0008) {
      // No resources (why?!)
      kprintf(KERN_ERR "%s: Unknown receiver error, status=%#4.4x\n", dev->name, intr_status);

      // No idea of what went wrong.  Restart the receiver
      outpd(ioaddr + SCBPointer, virt2phys(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]));
      outp(ioaddr + SCBCmd, RxStart);
    }

    sp->stats.rx_errors++;
  }
}
Exemplo n.º 10
0
int vmsync(void *addr, unsigned long size) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    vaddr = (char *) addr;
    for (i = 0; i < pages; i++) {
        if (page_directory_mapped(vaddr)) {
            pte_t flags = get_page_flags(vaddr);
            if ((flags & (PT_FILE | PT_PRESENT | PT_DIRTY)) == (PT_FILE | PT_PRESENT | PT_DIRTY)) {
                unsigned long pfn = BTOP(virt2phys(vaddr));
                struct filemap *newfm = (struct filemap *) hlookup(pfdb[pfn].owner);
                if (newfm != fm) {
                    if (fm) {
                        rc = unlock_filemap(fm);
                        if (rc < 0) return rc;
                    }
                    fm = newfm;
                    rc = wait_for_object(fm, INFINITE);
                    if (rc < 0) return rc;
                }

                rc = save_file_page(fm, vaddr);
                if (rc < 0) return rc;
            }
        }
        vaddr += PAGESIZE;
    }

    if (fm) {
        rc = unlock_filemap(fm);
        if (rc < 0) return rc;
    }

    return 0;
}
Exemplo n.º 11
0
void kfree(void *addr) {
  unsigned long bucket;
  struct bucket *b;

  // Check for NULL
  if (!addr) return;

  // Get page information
  bucket = pfdb[BTOP(virt2phys(addr))].size;

  // If a whole page or more, free directly
  if (bucket >= PAGESHIFT) {
    free_pages(addr, bucket - PAGESHIFT);
    return;
  }

  // Get bucket
  b = &buckets[bucket];

  // Free chunk to bucket
  *(void **) addr = b->mem;
  b->mem = addr;
}
Exemplo n.º 12
0
const Tile*
TileGrid::getTile(rcoord virt) const noexcept {
    return getTile(virt2phys(virt));
}
Exemplo n.º 13
0
Tile*
TileGrid::getTile(vicoord virt) noexcept {
    return getTile(virt2phys(virt));
}
Exemplo n.º 14
0
bool
TileGrid::inBounds(rcoord virt) const noexcept {
    return inBounds(virt2phys(virt));
}
Exemplo n.º 15
0
void c2dm_l2cache(int count,		/* number of regions */
		struct c2dmrgn rgns[],	/* array of regions */
		int dir)		/* cache operation */
{

	unsigned long size = 0;
	int rgn;

	for (rgn = 0; rgn < count; rgn++)
		size += rgns[rgn].span * rgns[rgn].lines;

	if (size >= L2THRESHOLD) {
		switch (dir) {
		case DMA_TO_DEVICE:
			/* Use clean all when available */
			/* Fall through for now */
		case DMA_BIDIRECTIONAL:
			/* Can't invalidate all without cleaning, so fall
			 * through to flush all to do both. */
		case DMA_FROM_DEVICE:
			outer_flush_all();
			break;
		}
	} else {
		int rgn;
		for (rgn = 0; rgn < count; rgn++) {

			int i, j;
			unsigned long linestart, start;
			unsigned long page_begin, end, offset,
				pageremain, lineremain;
			unsigned long phys, opsize;
			int page_num;

			/* beginning virtual address of each line */
			start = (unsigned long)rgns[rgn].start;

			for (i = 0; i < rgns[rgn].lines; i++) {

				linestart = start + (i * rgns[rgn].stride);

				/* beginning of the page for the new line */
				page_begin = linestart & PAGE_MASK;

				/* end of the new line */
				end = (unsigned long)linestart +
						rgns[rgn].span;

				page_num = DIV_ROUND_UP(end-page_begin,
						PAGE_SIZE);

				/* offset of the new line from page begin */
				offset = linestart - page_begin;

				/* track how long it is to the end of
				   the current page */
				pageremain = PAGE_SIZE - offset;

				/* keep track of how much of the line remains
				   to be copied */
				lineremain = rgns[rgn].span;

				for (j = 0; j < page_num; j++) {

					opsize = (lineremain < pageremain) ?
						lineremain : pageremain;

					phys = virt2phys(page_begin);
					phys = phys + offset;
					switch (dir) {
					case DMA_TO_DEVICE:
						outer_clean_range(phys,
							phys + opsize);
						break;
					case DMA_FROM_DEVICE:
						outer_inv_range(phys,
							phys + opsize);
						break;
					case DMA_BIDIRECTIONAL:
						outer_flush_range(phys,
							phys + opsize);
						break;
					}

					lineremain -= opsize;
					/* Move to next page */
					page_begin += PAGE_SIZE;

					/* After first page, start address
					 * will be page aligned so offset
					 * is 0 */
					offset = 0;

					if (!lineremain)
						break;

					pageremain -= opsize;
					if (!pageremain)
						pageremain = PAGE_SIZE;

				}
			}
		}
	}
}
Exemplo n.º 16
0
static enum gcerror get_physical_pages(struct gcmmuphysmem *mem,
					pte_t *parray,
					struct gcmmuarena *arena)
{
	enum gcerror gcerror = GCERR_NONE;
	struct vm_area_struct *vma;
	struct page **pages = NULL;
	unsigned int base, write;
	int i, count = 0;

	/* Reset page descriptor array. */
	arena->pages = NULL;

	/* Get base address shortcut. */
	base = mem->base;

	/* Store the logical pointer. */
	arena->logical = (void *) base;

	/*
	 * Important Note: base is mapped from user application process
	 * to current process - it must lie completely within the current
	 * virtual memory address space in order to be of use to us here.
	 */

	vma = find_vma(current->mm, base + (mem->count << PAGE_SHIFT) - 1);
	if ((vma == NULL) || (base < vma->vm_start)) {
		gcerror = GCERR_MMU_BUFFER_BAD;
		goto exit;
	}

	/* Allocate page descriptor array. */
	pages = kmalloc(mem->count * sizeof(struct page *), GFP_KERNEL);
	if (pages == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_DESC_ALLOC);
		goto exit;
	}

	/* Query page descriptors. */
	write = ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) != 0) ? 1 : 0;
	count = get_user_pages(current, current->mm, base, mem->count,
				write, 1, pages, NULL);

	if (count < 0) {
		/* Kernel allocated buffer. */
		for (i = 0; i < mem->count; i += 1) {
			gcerror = virt2phys(base, &parray[i]);
			if (gcerror != GCERR_NONE)
				goto exit;

			base += mem->pagesize;
		}
	} else if (count == mem->count) {
		/* User allocated buffer. */
		for (i = 0; i < mem->count; i += 1) {
			parray[i] = page_to_phys(pages[i]);
			if (phys_to_page(parray[i]) != pages[i]) {
				gcerror = GCERR_MMU_PAGE_BAD;
				goto exit;
			}
		}

		/* Set page descriptor array. */
		arena->pages = pages;
	} else {
		gcerror = GCERR_MMU_BUFFER_BAD;
		goto exit;
	}

exit:
	if (arena->pages == NULL) {
		for (i = 0; i < count; i += 1)
			page_cache_release(pages[i]);

		if (pages != NULL)
			kfree(pages);
	}

	return gcerror;
}
Exemplo n.º 17
0
static int speedo_set_rx_mode(struct dev *dev) {
  struct nic *sp = (struct nic *) dev->privdata;
  long ioaddr = sp->iobase;
  struct descriptor *last_cmd;
  char new_rx_mode;
  int entry, i;

  if (!dev->netif) {
    // Network interface not attached yet -- accept all multicasts
    new_rx_mode = 1;
  } else if (dev->netif->flags & NETIF_PROMISC) {     
    // Set promiscuous.
    new_rx_mode = 3;
  } else if ((dev->netif->flags & NETIF_ALLMULTI) || dev->netif->mccount > multicast_filter_limit) {
    new_rx_mode = 1;
  } else {
    new_rx_mode = 0;
  }

  if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
    // The Tx ring is full -- don't add anything!  Presumably the new mode
    // is in config_cmd_data and will be added anyway, otherwise we wait
    // for a timer tick or the mode to change again.
    sp->rx_mode = -1;
    return -EBUSY;
  }

  if (new_rx_mode != sp->rx_mode) {
    unsigned char *config_cmd_data;

    cli();
    entry = sp->cur_tx % TX_RING_SIZE;
    last_cmd = sp->last_cmd;
    sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];

    sp->tx_pbuf[entry] = 0;     // Redundant
    sp->tx_ring[entry].status = CmdSuspend | CmdConfigure;
    sp->cur_tx++;
    sp->tx_ring[entry].link = virt2phys(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
    // We may nominally release the lock here

    config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
    // Construct a full CmdConfig frame
    memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
    config_cmd_data[1] = (txfifo << 4) | rxfifo;
    config_cmd_data[4] = rxdmacount;
    config_cmd_data[5] = txdmacount + 0x80;
    if (sp->flags & HasChksum) config_cmd_data[9] |= 1;
    config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
    config_cmd_data[19] = sp->flow_ctrl ? 0xBD : 0x80;
    config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
    config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
    if (sp->phy[0] & 0x8000) {
      // Use the AUI port instead.
      config_cmd_data[15] |= 0x80;
      config_cmd_data[8] = 0;
    }
    // Trigger the command unit resume.
    wait_for_cmd_done(ioaddr + SCBCmd);
    clear_suspend(last_cmd);
    outp(ioaddr + SCBCmd, CUResume);
    sti();
    sp->last_cmd_time = get_ticks();
  }

  if (new_rx_mode == 0 && dev->netif->mccount < 4) {
    // The simple case of 0-3 multicast list entries occurs often, and
    // fits within one tx_ring[] entry.
    struct mclist *mclist;
    unsigned short *setup_params, *eaddrs;

    cli();
    entry = sp->cur_tx % TX_RING_SIZE;
    last_cmd = sp->last_cmd;
    sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];

    sp->tx_pbuf[entry] = 0;
    sp->tx_ring[entry].status = CmdSuspend | CmdMulticastList;
    sp->cur_tx++;
    sp->tx_ring[entry].link = virt2phys(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
    // We may nominally release the lock here
    sp->tx_ring[entry].tx_desc_addr = 0; // Really MC list count
    setup_params = (unsigned short *) &sp->tx_ring[entry].tx_desc_addr;
    *setup_params++ = dev->netif->mccount * 6;
    
    // Fill in the multicast addresses.
    for (i = 0, mclist = dev->netif->mclist; i < dev->netif->mccount; i++, mclist = mclist->next) {
      eaddrs = (unsigned short *) &mclist->hwaddr;
      *setup_params++ = *eaddrs++;
      *setup_params++ = *eaddrs++;
      *setup_params++ = *eaddrs++;
    }

    wait_for_cmd_done(ioaddr + SCBCmd);
    clear_suspend(last_cmd);
    // Immediately trigger the command unit resume
    outp(ioaddr + SCBCmd, CUResume);
    sti();
    sp->last_cmd_time = get_ticks();
  } else if (new_rx_mode == 0) {
    struct mclist *mclist;
    unsigned short *setup_params, *eaddrs;
    struct descriptor *mc_setup_frm = sp->mc_setup_frm;
    int i;

    if (sp->mc_setup_frm_len < 10 + dev->netif->mccount * 6 || sp->mc_setup_frm == NULL) {
      // Allocate a full setup frame, 10bytes + <max addrs>.
      if (sp->mc_setup_frm) kfree(sp->mc_setup_frm);
      sp->mc_setup_busy = 0;
      sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;
      sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len);
      if (sp->mc_setup_frm == NULL) {
        kprintf(KERN_WARNING "%s: Failed to allocate a setup frame\n", dev->name);
        sp->rx_mode = -1; // We failed, try again.
        return -ENOMEM;
      }
    }

    // If we are busy, someone might be quickly adding to the MC list.
    // Try again later when the list updates stop.
    if (sp->mc_setup_busy) {
      sp->rx_mode = -1;
      return -EBUSY;
    }
    mc_setup_frm = sp->mc_setup_frm;

    // Fill the setup frame
    kprintf("%s: Constructing a setup frame at %p, %d bytes\n", dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
    mc_setup_frm->cmd_status =  CmdSuspend | CmdIntr | CmdMulticastList;
    
    // Link set below
    setup_params = (unsigned short *) &mc_setup_frm->params;
    *setup_params++ = dev->netif->mccount * 6;
    
    // Fill in the multicast addresses
    for (i = 0, mclist = dev->netif->mclist; i < dev->netif->mccount; i++, mclist = mclist->next) {
      eaddrs = (unsigned short *) &mclist->hwaddr;
      *setup_params++ = *eaddrs++;
      *setup_params++ = *eaddrs++;
      *setup_params++ = *eaddrs++;
    }

    // Disable interrupts while playing with the Tx Cmd list
    cli();
    entry = sp->cur_tx % TX_RING_SIZE;
    last_cmd = sp->last_cmd;
    sp->last_cmd = mc_setup_frm;
    sp->mc_setup_busy++;

    // Change the command to a NoOp, pointing to the CmdMulti command
    sp->tx_pbuf[entry] = 0;
    sp->tx_ring[entry].status = CmdNOp;
    sp->cur_tx++;
    sp->tx_ring[entry].link = virt2phys(mc_setup_frm);
    // We may nominally release the lock here

    // Set the link in the setup frame.
    mc_setup_frm->link = virt2phys(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));

    wait_for_cmd_done(ioaddr + SCBCmd);
    clear_suspend(last_cmd);

    // Immediately trigger the command unit resume
    outp(ioaddr + SCBCmd, CUResume);
    sti();
    sp->last_cmd_time = get_ticks();
    kprintf("%s: CmdMCSetup frame length %d in entry %d\n", dev->name, dev->netif->mccount, entry);
  }

  sp->rx_mode = new_rx_mode;
  return 0;
}
Exemplo n.º 18
0
/*
 * Jumping to EL2 in the same C code represents an interesting challenge, since
 * it will switch from virtual addresses to physical ones, and then back to
 * virtual after setting up the EL2 MMU.
 * To this end, the setup_mmu and cpu_switch_el2 functions are naked and must
 * handle the stack themselves.
 */
int switch_exception_level(struct per_cpu *cpu_data)
{
	extern unsigned long bootstrap_vectors;
	extern unsigned long hyp_vectors;

	/* Save the virtual address of the phys2virt function for later */
	phys2virt_t phys2virt = paging_phys2hvirt;
	virt2phys_t virt2phys = paging_hvirt2phys;
	unsigned long phys_bootstrap = virt2phys(&bootstrap_vectors);
	struct per_cpu *phys_cpu_data = (struct per_cpu *)virt2phys(cpu_data);
	unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
	unsigned long trampoline_size = &trampoline_end - &trampoline_start;
	unsigned long stack_virt = (unsigned long)cpu_data->stack;
	unsigned long stack_phys = virt2phys((void *)stack_virt);
	u64 ttbr_el2;

	/* Check the paging structures as well as the MMU initialisation */
	unsigned long jailhouse_base_phys =
		paging_virt2phys(&hv_paging_structs, JAILHOUSE_BASE,
				 PAGE_DEFAULT_FLAGS);

	/*
	 * The hypervisor stub allows to fetch its current vector base by doing
	 * an HVC with r0 = -1. They will need to be restored when disabling
	 * jailhouse.
	 */
	if (saved_vectors == 0)
		saved_vectors = hvc(-1);

	/*
	 * paging struct won't be easily accessible when initializing el2, only
	 * the CPU datas will be readable at their physical address
	 */
	ttbr_el2 = (u64)virt2phys(hv_paging_structs.root_table) & TTBR_MASK;

	/*
	 * Mirror the mmu setup code, so that we are able to jump to the virtual
	 * address after enabling it.
	 * Those regions must fit on one page.
	 */

	if (set_id_map(0, trampoline_phys, trampoline_size) != 0)
		return -E2BIG;
	if (set_id_map(1, stack_phys, PAGE_SIZE) != 0)
		return -E2BIG;
	create_id_maps();

	/*
	 * Before doing anything hairy, we need to sync the caches with memory:
	 * they will be off at EL2. From this point forward and until the caches
	 * are re-enabled, we cannot write anything critical to memory.
	 */
	arch_cpu_dcaches_flush(CACHES_CLEAN);

	cpu_switch_el2(phys_bootstrap, virt2phys);
	/*
	 * At this point, we are at EL2, and we work with physical addresses.
	 * The MMU needs to be initialised and execution must go back to virtual
	 * addresses before returning, or else we are pretty much doomed.
	 */

	setup_mmu_el2(phys_cpu_data, phys2virt, ttbr_el2);

	/* Sanity check */
	check_mmu_map(JAILHOUSE_BASE, jailhouse_base_phys);

	/* Set the new vectors once we're back to a sane, virtual state */
	arm_write_sysreg(HVBAR, &hyp_vectors);

	/* Remove the identity mapping */
	destroy_id_maps();

	return 0;
}
Exemplo n.º 19
0
static int speedo_rx(struct dev *dev) {
  struct nic *sp = (struct nic *) dev->privdata;
  int entry = sp->cur_rx % RX_RING_SIZE;
  int status;
  int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;

  //kprintf("%s: In speedo_rx()\n", dev->name);

  // If we own the next entry, it's a new packet. Send it up.
  while (sp->rx_ringp[entry] != NULL &&  (status = sp->rx_ringp[entry]->status) & RxComplete) {
    int desc_count = sp->rx_ringp[entry]->count;
    int pkt_len = desc_count & 0x07ff;

    if (--rx_work_limit < 0) break;

    //kprintf("%s: speedo_rx() status %8.8x len %d\n", dev->name, status, pkt_len);

    if ((status & (RxErrTooBig | RxOK | 0x0f90)) != RxOK) {
      if (status & RxErrTooBig) {
        kprintf("KERN_WARNING %s: Ethernet frame overran the Rx buffer, status %8.8x!\n", dev->name, status);
      } else if (!(status & RxOK)) {
        // There was a fatal error.  This *should* be impossible.
        sp->stats.rx_errors++;
        kprintf(KERN_WARNING "%s: Anomalous event in speedo_rx(), status %8.8x\n", dev->name, status);
      }
    } else {
      struct pbuf *p;

      if (sp->flags & HasChksum) pkt_len -= 2;

      // Check if the packet is long enough to just accept without
      // copying to a properly sized packet buffer

      if (pkt_len < rx_copybreak && (p = pbuf_alloc(PBUF_RAW, pkt_len, PBUF_RW)) != NULL) {
        memcpy(p->payload, sp->rx_pbuf[entry]->payload, pkt_len);
      } else {
        // Pass up the already-filled pbuf
        p = sp->rx_pbuf[entry];
        if (p == NULL) {
          kprintf(KERN_WARNING "%s: Inconsistent Rx descriptor chain\n", dev->name);
          break;
        }

        sp->rx_pbuf[entry] = NULL;
        sp->rx_ringp[entry] = NULL;

        pbuf_realloc(p, pkt_len);
      }

      // Send packet to upper layer
      if (dev_receive(sp->devno, p) < 0) pbuf_free(p);

      sp->stats.rx_packets++;
      sp->stats.rx_bytes += pkt_len;
    }

    entry = (++sp->cur_rx) % RX_RING_SIZE;
  }

  // Refill the Rx ring buffers
  for (; sp->cur_rx - sp->dirty_rx > 0; sp->dirty_rx++) {
    struct RxFD *rxf;
    entry = sp->dirty_rx % RX_RING_SIZE;
    
    if (sp->rx_pbuf[entry] == NULL) {
      struct pbuf *p;

      // Get a fresh pbuf to replace the consumed one
      p = pbuf_alloc(PBUF_RAW, PKT_BUF_SZ + sizeof(struct RxFD), PBUF_RW);
      sp->rx_pbuf[entry] = p;
      if (p == NULL) {
        sp->rx_ringp[entry] = NULL;
        sp->alloc_failures++;
        break;      // Better luck next time! 
      }
      rxf = sp->rx_ringp[entry] = (struct RxFD *) p->payload;
      pbuf_header(p, - (int) sizeof(struct RxFD));
      rxf->rx_buf_addr = virt2phys(p->payload);
    } else {
      rxf = sp->rx_ringp[entry];
    }

    rxf->status = 0xC0000001;  // '1' for driver use only
    rxf->link = 0;      // None yet
    rxf->count = PKT_BUF_SZ << 16;
    sp->last_rxf->link = virt2phys(rxf);
    sp->last_rxf->status &= ~0xC0000000;
    sp->last_rxf = rxf;
  }

  sp->last_rx_time = get_ticks();
  return 0;
}