Beispiel #1
0
void *kmalloc_tag(int size, unsigned long tag) {
  struct bucket *b;
  int bucket;
  void *addr;

  // Handle large allocation by allocating pages
  if (size > PAGESIZE / 2) {
    // Allocate pages
    addr = alloc_pages(PAGES(size), tag ? tag : 'ALOC');

    // Set size in pfn entry
    pfdb[BTOP(virt2phys(addr))].size = PAGES(size) + PAGESHIFT;

    return addr;
  }

  // Otherwise allocate from one of the buckets
  bucket = BUCKET(size);
  b = &buckets[bucket];

  // If bucket is empty the allocate one more page for the bucket
  if (b->mem == 0) {
    char *p;
    int i;

    // Allocate new page
    addr = alloc_pages(1, 'HEAP');

    // Set bucket number in pfn entry
    pfdb[BTOP(virt2phys(addr))].size = bucket;

    // Split page into chunks
    p = (char *) addr;
    for (i = 0; i < PAGESIZE; i += b->size)  {
      *(void **)(p + i) = b->mem;
      b->mem = p + i;
    }

    // Update count of pages used for this bucket
    b->pages++;
  }

  // Allocate chunk from bucket
  addr = b->mem;
  b->mem = *(void **) addr;

  // Return allocated chunk
  return addr;
}
Beispiel #2
0
/**
 * Perform MMIO-specific initialization for a new cell.
 * @param cell		Cell to be initialized.
 *
 * @return 0 on success, negative error code otherwise.
 *
 * @see mmio_cell_exit
 */
int mmio_cell_init(struct cell *cell)
{
	const struct jailhouse_memory *mem;
	unsigned int n;
	void *pages;

	cell->max_mmio_regions = arch_mmio_count_regions(cell);

	for_each_mem_region(mem, cell->config, n)
		if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
			cell->max_mmio_regions++;

	pages = page_alloc(&mem_pool,
			   PAGES(cell->max_mmio_regions *
				 (sizeof(struct mmio_region_location) +
				  sizeof(struct mmio_region_handler))));
	if (!pages)
		return -ENOMEM;

	cell->mmio_locations = pages;
	cell->mmio_handlers = pages +
		cell->max_mmio_regions * sizeof(struct mmio_region_location);

	return 0;
}
Beispiel #3
0
/**
 * Perform MMIO-specific cleanup for a cell under destruction.
 * @param cell		Cell to be destructed.
 *
 * @see mmio_cell_init
 */
void mmio_cell_exit(struct cell *cell)
{
	page_free(&mem_pool, cell->mmio_locations,
		  PAGES(cell->max_mmio_regions *
			(sizeof(struct mmio_region_location) +
			 sizeof(struct mmio_region_handler))));
}
Beispiel #4
0
void miounmap(void *addr, int size) {
    int i;
    int pages = PAGES(size);

    for (i = 0; i < pages; i++) unmap_page((char *) addr + PTOB(i));
    rmap_free(vmap, BTOP(addr), pages);
}
Beispiel #5
0
int vmfree(void *addr, unsigned long size, int type) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    if (type & (MEM_DECOMMIT | MEM_RELEASE)) {
        vaddr = (char *) addr;
        for (i = 0; i < pages; i++) {
            if (page_directory_mapped(vaddr)) {
                pte_t flags = get_page_flags(vaddr);
                unsigned long pfn = BTOP(virt2phys(vaddr));

                if (flags & PT_FILE) {
                    handle_t h = (flags & PT_PRESENT) ? pfdb[pfn].owner : pfn;
                    struct filemap *newfm = (struct filemap *) hlookup(h);
                    if (newfm != fm) {
                        if (fm) {
                            if (fm->pages == 0) {
                                rc = free_filemap(fm);
                            } else {
                                rc = unlock_filemap(fm);
                            }
                            if (rc < 0) return rc;
                        }
                        fm = newfm;
                        rc = wait_for_object(fm, INFINITE);
                        if (rc < 0) return rc;
                    }
                    fm->pages--;
                    unmap_page(vaddr);
                    if (flags & PT_PRESENT) free_pageframe(pfn);
                } else  if (flags & PT_PRESENT) {
                    unmap_page(vaddr);
                    free_pageframe(pfn);
                }
            }

            vaddr += PAGESIZE;
        }
    }

    if (fm) {
        if (fm->pages == 0) {
            rc = free_filemap(fm);
        } else {
            rc = unlock_filemap(fm);
        }
        if (rc < 0) return rc;
    } else if (type & MEM_RELEASE) {
        rmap_free(vmap, BTOP(addr), pages);
    }

    return 0;
}
Beispiel #6
0
static int valid_range(void *addr, int size) {
    int pages = PAGES(size);

    if ((unsigned long) addr < VMEM_START) return 0;
    if (KERNELSPACE((unsigned long) addr + pages * PAGESIZE)) return 0;
    if (rmap_status(vmap, BTOP(addr), pages) != 1) return 0;
    return 1;
}
Beispiel #7
0
void *vmmap(void *addr, unsigned long size, int protect, struct file *filp, off64_t offset, int *rc) {
    int pages = PAGES(size);
    unsigned long flags = pte_flags_from_protect(protect);
    struct filemap *fm;
    int i;
    char *vaddr;

    if (rc) *rc = 0;
    if (size == 0 || flags == 0xFFFFFFFF) {
        if (rc) *rc = -EINVAL;
        return NULL;
    }
    addr = (void *) PAGEADDR(addr);
    if (addr == NULL) {
        addr = (void *) PTOB(rmap_alloc(vmap, pages));
        if (addr == NULL) {
            if (rc) *rc = -ENOMEM;
            return NULL;
        }
    } else {
        if (rmap_reserve(vmap, BTOP(addr), pages)) {
            if (rc) *rc = -ENOMEM;
            return NULL;
        }
    }

    fm = (struct filemap *) kmalloc(sizeof(struct filemap));
    if (!fm) {
        rmap_free(vmap, BTOP(addr), pages);
        if (rc) *rc = -ENOMEM;
        return NULL;
    }
    init_object(&fm->object, OBJECT_FILEMAP);
    fm->self = halloc(&fm->object);
    fm->file = halloc(&filp->iob.object);
    if (fm->self < 0 || fm->file < 0) {
        if (rc) *rc = -ENFILE;
        return NULL;
    }
    hprotect(fm->self);
    hprotect(fm->file);
    fm->offset = offset;
    fm->pages = pages;
    fm->object.signaled = 1;
    fm->addr = addr;
    fm->size = size;
    fm->protect = flags | PT_FILE;

    vaddr = (char *) addr;
    flags = (flags & ~PT_USER) | PT_FILE;
    for (i = 0; i < pages; i++) {
        map_page(vaddr, fm->self, flags);
        vaddr += PAGESIZE;
    }

    return addr;
}
Beispiel #8
0
static int free_filemap(struct filemap *fm) {
    int rc;

    hunprotect(fm->file);
    rc = hfree(fm->file);
    if (rc < 0) return rc;

    rmap_free(vmap, BTOP(fm->addr), PAGES(fm->size));

    hunprotect(fm->self);
    rc = hfree(fm->self);
    if (rc < 0) return rc;

    return 0;
}
Beispiel #9
0
void *miomap(unsigned long addr, int size, int protect) {
    char *vaddr;
    int i;
    unsigned long flags = pte_flags_from_protect(protect);
    int pages = PAGES(size);

    vaddr = (char *) PTOB(rmap_alloc(vmap, pages));
    if (vaddr == NULL) return NULL;

    for (i = 0; i < pages; i++) {
        map_page(vaddr + PTOB(i), BTOP(addr) + i, flags | PT_PRESENT);
    }

    return vaddr;
}
Beispiel #10
0
static int rtl8139_open(struct dev *dev) {
  struct nic *tp = (struct nic *) dev->privdata;
  long ioaddr = tp->iobase;
  int rx_buf_len_idx;

  enable_irq(tp->irq);
  init_sem(&tp->tx_sem, NUM_TX_DESC);

  // The Rx ring allocation size is 2^N + delta, which is worst-case for
  // the kernel binary-buddy allocation.  We allocate the Tx bounce buffers
  // at the same time to use some of the otherwise wasted space.
  // The delta of +16 is required for dribble-over because the receiver does
  // not wrap when the packet terminates just beyond the end of the ring

  rx_buf_len_idx = RX_BUF_LEN_IDX;
  do {
    tp->rx_buf_len = 8192 << rx_buf_len_idx;
    tp->rx_ring = alloc_pages_linear(PAGES(tp->rx_buf_len + 16 + (TX_BUF_SIZE * NUM_TX_DESC)), 'NIC');
  } while (tp->rx_ring == NULL && --rx_buf_len_idx >= 0);

  if (tp->rx_ring == NULL) return -ENOMEM;
  tp->tx_bufs = tp->rx_ring + tp->rx_buf_len + 16;

  rtl8139_init_ring(dev);
  tp->full_duplex = tp->duplex_lock;
  tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
  tp->rx_config = (RX_FIFO_THRESH << 13) | (rx_buf_len_idx << 11) | (RX_DMA_BURST << 8);

  rtl_hw_start(dev);
  //netif_start_tx_queue(dev);

  //kprintf("%s: rtl8139_open() ioaddr %#lx IRQ %d GP Pins %2.2x %s-duplex\n",
  //      dev->name, ioaddr, tp->irq, inp(ioaddr + GPPinData),
  //      tp->full_duplex ? "full" : "half");

  // Set the timer to switch to check for link beat and perhaps switch
  // to an alternate media type
  init_timer(&tp->timer, rtl8139_timer, dev);
  mod_timer(&tp->timer, get_ticks() + 3*HZ);

  return 0;
}
Beispiel #11
0
int vmsync(void *addr, unsigned long size) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    vaddr = (char *) addr;
    for (i = 0; i < pages; i++) {
        if (page_directory_mapped(vaddr)) {
            pte_t flags = get_page_flags(vaddr);
            if ((flags & (PT_FILE | PT_PRESENT | PT_DIRTY)) == (PT_FILE | PT_PRESENT | PT_DIRTY)) {
                unsigned long pfn = BTOP(virt2phys(vaddr));
                struct filemap *newfm = (struct filemap *) hlookup(pfdb[pfn].owner);
                if (newfm != fm) {
                    if (fm) {
                        rc = unlock_filemap(fm);
                        if (rc < 0) return rc;
                    }
                    fm = newfm;
                    rc = wait_for_object(fm, INFINITE);
                    if (rc < 0) return rc;
                }

                rc = save_file_page(fm, vaddr);
                if (rc < 0) return rc;
            }
        }
        vaddr += PAGESIZE;
    }

    if (fm) {
        rc = unlock_filemap(fm);
        if (rc < 0) return rc;
    }

    return 0;
}
Beispiel #12
0
int vmprotect(void *addr, unsigned long size, int protect) {
    int pages = PAGES(size);
    int i;
    char *vaddr;
    unsigned long flags;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;
    flags = pte_flags_from_protect(protect);
    if (flags == 0xFFFFFFFF) return -EINVAL;

    vaddr = (char *) addr;
    for (i = 0; i < pages; i++) {
        if (page_mapped(vaddr)) {
            set_page_flags(vaddr, (get_page_flags(vaddr) & ~PT_PROTECTMASK) | flags);
        }
        vaddr += PAGESIZE;
    }

    return 0;
}
Beispiel #13
0
void load_kernel(int bootdrv) {
  struct master_boot_record *mbr;
  struct superblock *sb;
  struct groupdesc *group;
  struct inodedesc *inode;
  int blocksize;
  int blks_per_sect;
  int kernelsize;
  int kernelpages;
  char *kerneladdr;
  struct dos_header *doshdr;
  struct image_header *imghdr;
  char *addr;
  blkno_t blkno;
  int i;
  int j;
  pte_t *pt;
  int imgpages;
  int start;
  char *label;
  struct boot_sector *bootsect;

  //kprintf("Loading kernel");

  // Determine active boot partition if booting from harddisk
  if (bootdrv & 0x80 && (bootdrv & 0xF0) != 0xF0) {
    mbr = (struct master_boot_record *) bsect;
    if (boot_read(mbr, SECTORSIZE, 0) != SECTORSIZE) {
      panic("unable to read master boot record");
    }

    if (mbr->signature != MBR_SIGNATURE) panic("invalid boot signature");

    bootsect = (struct boot_sector *) bsect;
    label = bootsect->label;
    if (label[0] == 'S' && label[1] == 'A' && label[2] == 'N' && label[3] == 'O' && label[4] == 'S') {
      // Disk does not have a partition table
      start = 0;
      bootpart = -1;
    } else {
      // Find active partition
      bootpart = -1;
      for (i = 0; i < 4; i++) {
        if (mbr->parttab[i].bootid == 0x80) {
          bootpart = i;
          start = mbr->parttab[i].relsect;
        }
      }

      if (bootpart == -1) panic("no bootable partition on boot drive");
    }
  } else {
    start = 0;
    bootpart = 0;
  }

  // Read super block from boot device
  sb = (struct superblock *) ssect;
  if (boot_read(sb, SECTORSIZE, 1 + start) != SECTORSIZE) {
    panic("unable to read super block from boot device");
  }

  // Check signature and version
  if (sb->signature != DFS_SIGNATURE) panic("invalid DFS signature");
  if (sb->version != DFS_VERSION) panic("invalid DFS version");
  blocksize = 1 << sb->log_block_size;
  blks_per_sect =  blocksize / SECTORSIZE;

  // Read first group descriptor
  group = (struct groupdesc *) gsect;
  if (boot_read(group, SECTORSIZE, sb->groupdesc_table_block * blks_per_sect + start) != SECTORSIZE) {
    panic("unable to read group descriptor from boot device");
  }

  // Read inode for kernel
  inode = (struct inodedesc *) isect;
  if (boot_read(isect, SECTORSIZE, group->inode_table_block * blks_per_sect + start) != SECTORSIZE) {
    panic("unable to read kernel inode from boot device");
  }
  inode += DFS_INODE_KRNL;

  // Calculate kernel size
  kernelsize = (int) inode->size;
  kernelpages = PAGES(kernelsize);
  //kprintf("Kernel size %d KB\n", kernelsize / 1024);

  // Allocate page table for kernel
  if (kernelpages > PTES_PER_PAGE) panic("kernel too big");
  pt = (pte_t *) alloc_heap(1);
  pdir[PDEIDX(OSBASE)] = (unsigned long) pt | PT_PRESENT | PT_WRITABLE;

  // Allocate pages for kernel
  kerneladdr = alloc_heap(kernelpages);

  // Read kernel from boot device
  if (inode->depth == 0) {
    addr = kerneladdr;
    for (i = 0; i < (int) inode->blocks; i++) {
      if (boot_read(addr, blocksize, inode->blockdir[i] * blks_per_sect + start) != blocksize) {
        panic("error reading kernel from boot device");
      }
      addr += blocksize;
    }
  } else if (inode->depth == 1) {
    addr = kerneladdr;
    blkno = 0;
    for (i = 0; i < DFS_TOPBLOCKDIR_SIZE; i++) {
      if (boot_read(blockdir, blocksize, inode->blockdir[i] * blks_per_sect + start) != blocksize) {
        panic("error reading kernel inode dir from boot device");
      }

      for (j = 0; j < (int) (blocksize / sizeof(blkno_t)); j++) {
        if (boot_read(addr, blocksize, blockdir[j] * blks_per_sect + start) != blocksize) {
          panic("error reading kernel inode dir from boot device");
        }
        
        addr += blocksize;

        blkno++;
        if (blkno == inode->blocks) break;
      }

      if (blkno == inode->blocks) break;
    }
  } else {
    panic("unsupported inode depth");
  }

  // Determine entry point for kernel
  doshdr = (struct dos_header *) kerneladdr;
  imghdr = (struct image_header *) (kerneladdr + doshdr->e_lfanew);
  krnlentry = imghdr->optional.address_of_entry_point + OSBASE;

  // Allocate pages for .data section
  imgpages = PAGES(imghdr->optional.size_of_image);
  alloc_heap(imgpages - kernelpages);

  // Relocate resource data and clear uninitialized data
  if (imghdr->header.number_of_sections == 4) {
    struct image_section_header *data = &imghdr->sections[2];
    struct image_section_header *rsrc = &imghdr->sections[3];
    memcpy(kerneladdr + rsrc->virtual_address, kerneladdr + rsrc->pointer_to_raw_data, rsrc->size_of_raw_data);
    memset(kerneladdr + data->virtual_address + data->size_of_raw_data, 0, data->virtual_size - data->size_of_raw_data);
  }

  // Map kernel into vitual address space
  for (i = 0; i < imgpages; i++) pt[i] = (unsigned long) (kerneladdr + i * PAGESIZE) | PT_PRESENT | PT_WRITABLE;
}
Beispiel #14
0
void *vmalloc(void *addr, unsigned long size, int type, int protect, unsigned long tag, int *rc) {
    int pages = PAGES(size);
    unsigned long flags = pte_flags_from_protect(protect);
    int i;

    if (rc) *rc = 0;
    if (size == 0) {
        if (rc) *rc = -EINVAL;
        return NULL;
    }
    if ((type & MEM_COMMIT) != 0 && flags == 0xFFFFFFFF) {
        if (rc) *rc = -EINVAL;
        return NULL;
    }
    addr = (void *) PAGEADDR(addr);
    if (!addr && (type & MEM_COMMIT) != 0) type |= MEM_RESERVE;
    if (!tag) tag = 'VM';

    if (type & MEM_RESERVE) {
        if (addr == NULL) {
            if (type & MEM_ALIGN64K) {
                addr = (void *) PTOB(rmap_alloc_align(vmap, pages, 64 * 1024 / PAGESIZE));
            } else {
                addr = (void *) PTOB(rmap_alloc(vmap, pages));
            }

            if (addr == NULL) {
                if (rc) *rc = -ENOMEM;
                return NULL;
            }
        } else {
            if (rmap_reserve(vmap, BTOP(addr), pages)) {
                if (rc) *rc = -ENOMEM;
                return NULL;
            }
        }
    } else {
        if (!valid_range(addr, size)) {
            if (rc) *rc = -EFAULT;
            return NULL;
        }
    }

    if (type & MEM_COMMIT) {
        char *vaddr;
        unsigned long pfn;

        vaddr = (char *) addr;
        for (i = 0; i < pages; i++) {
            if (page_mapped(vaddr)) {
                set_page_flags(vaddr, flags | PT_PRESENT);
            } else {
                pfn = alloc_pageframe(tag);
                if (pfn == 0xFFFFFFFF) {
                    if (rc) *rc = -ENOMEM;
                    return NULL;
                }

                map_page(vaddr, pfn, flags | PT_PRESENT);
                memset(vaddr, 0, PAGESIZE);
            }
            vaddr += PAGESIZE;
        }
    }

    return addr;
}