Exemplo n.º 1
0
static int read_evdev(struct device *dev, struct dev_input *inp)
{
	struct input_event iev;	/* linux evdev event */
	int rdbytes;

	if(!IS_DEV_OPEN(dev))
		return -1;

	do {
		rdbytes = read(dev->fd, &iev, sizeof iev);
	} while(rdbytes == -1 && errno == EINTR);

	/* disconnect? */
	if(rdbytes == -1) {
		if(errno != EAGAIN) {
			perror("read error");
			remove_device(dev);
		}
		return -1;
	}

	if(rdbytes > 0) {
		inp->tm = iev.time;

		switch(iev.type) {
		case EV_REL:
			inp->type = INP_MOTION;
			inp->idx = iev.code - REL_X;
			inp->val = iev.value;
			/*printf("[%s] EV_REL(%d): %d\n", dev->name, inp->idx, iev.value);*/
			break;

		case EV_ABS:
			inp->type = INP_MOTION;
			inp->idx = iev.code - ABS_X;
			inp->val = map_range(dev, inp->idx, iev.value);
			/*printf("[%s] EV_ABS(%d): %d (orig: %d)\n", dev->name, inp->idx, inp->val, iev.value);*/
			break;

		case EV_KEY:
			inp->type = INP_BUTTON;
			inp->idx = iev.code - BTN_0;
			inp->val = iev.value;
			break;

		case EV_SYN:
			inp->type = INP_FLUSH;
			/*printf("[%s] EV_SYN\n", dev->name);*/
			break;

		default:
			return -1;
		}
	}
	return 0;

}
Exemplo n.º 2
0
void __init kasan_init(void)
{
	int i;

#ifdef CONFIG_KASAN_INLINE
	register_die_notifier(&kasan_die_notifier);
#endif

	memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
	load_cr3(early_level4_pgt);
	__flush_tlb_all();

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
			kasan_mem_to_shadow((void *)PAGE_OFFSET));

	for (i = 0; i < E820_X_MAX; i++) {
		if (pfn_mapped[i].end == 0)
			break;

		if (map_range(&pfn_mapped[i]))
			panic("kasan: unable to allocate shadow!");
	}
	kasan_populate_zero_shadow(
		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
		kasan_mem_to_shadow((void *)__START_KERNEL_map));

	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
			(unsigned long)kasan_mem_to_shadow(_end),
			NUMA_NO_NODE);

	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
			(void *)KASAN_SHADOW_END);

	load_cr3(init_level4_pgt);
	__flush_tlb_all();

	/*
	 * kasan_zero_page has been used as early shadow memory, thus it may
	 * contain some garbage. Now we can clear and write protect it, since
	 * after the TLB flush no one should write to it.
	 */
	memset(kasan_zero_page, 0, PAGE_SIZE);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
		set_pte(&kasan_zero_pte[i], pte);
	}
	/* Flush TLBs again to be sure that write protection applied. */
	__flush_tlb_all();

	init_task.kasan_depth = 0;
	pr_info("KernelAddressSanitizer initialized\n");
}
Exemplo n.º 3
0
uintptr_t map_virtual_virtual(uintptr_t* _vastart, uintptr_t vaend, bool readonly) {
    uintptr_t vastart = *_vastart;
    uintptr_t vaoffset = vastart % 0x1000;
    vastart = ALIGN_DOWN(vastart, 0x1000);
    vaend = ALIGN_UP(vaend, 0x1000);

    proc_t* proc = get_current_process();

    mmap_area_t** _hole = find_va_hole(proc, vaend-vastart, 0x1000);
    mmap_area_t* hole = *_hole;
    if (hole == NULL) {
        return 0;
    }
    hole->mtype = kernel_allocated_heap_data;
    uintptr_t temporary = hole->vastart;
    if (!map_range(_vastart, vaend, &temporary, hole->vaend, true, readonly, false, proc->pml4)) {
        free_mmap_area(hole, _hole, proc);
        return 0;
    }
    return hole->vastart+vaoffset;
}
Exemplo n.º 4
0
void inmate_main(void)
{
	void (*entry)(int, struct boot_params *);
	struct setup_data *setup_data;
	void *kernel;

	kernel = (void *)(unsigned long)boot.params.kernel_alignment;

	map_range(kernel, boot.params.init_size, MAP_CACHED);

	setup_data = (struct setup_data *)boot.params.setup_data;
	setup_data->pm_timer_address = comm_region->pm_timer_address;
	setup_data->pci_mmconfig_base = comm_region->pci_mmconfig_base;
	setup_data->tsc_khz = comm_region->tsc_khz;
	setup_data->apic_khz = comm_region->apic_khz;
	setup_data->num_cpus = comm_region->num_cpus;

	smp_wait_for_all_cpus();
	memcpy(setup_data->cpu_ids, smp_cpu_ids, SMP_MAX_CPUS);

	entry = kernel + 0x200;
	entry(0, &boot.params);
}
Exemplo n.º 5
0
void __init kasan_init(void)
{
	int i;
	void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;

#ifdef CONFIG_KASAN_INLINE
	register_die_notifier(&kasan_die_notifier);
#endif

	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));

	/*
	 * We use the same shadow offset for 4- and 5-level paging to
	 * facilitate boot-time switching between paging modes.
	 * As result in 5-level paging mode KASAN_SHADOW_START and
	 * KASAN_SHADOW_END are not aligned to PGD boundary.
	 *
	 * KASAN_SHADOW_START doesn't share PGD with anything else.
	 * We claim whole PGD entry to make things easier.
	 *
	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
	 * bunch of things like kernel code, modules, EFI mapping, etc.
	 * We need to take extra steps to not overwrite them.
	 */
	if (pgtable_l5_enabled()) {
		void *ptr;

		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
	}

	load_cr3(early_top_pgt);
	__flush_tlb_all();

	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);

	kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
			kasan_mem_to_shadow((void *)PAGE_OFFSET));

	for (i = 0; i < E820_MAX_ENTRIES; i++) {
		if (pfn_mapped[i].end == 0)
			break;

		map_range(&pfn_mapped[i]);
	}

	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
	shadow_cpu_entry_begin = (void *)round_down(
			(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);

	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
					CPU_ENTRY_AREA_MAP_SIZE);
	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
	shadow_cpu_entry_end = (void *)round_up(
			(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);

	kasan_populate_early_shadow(
		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
		shadow_cpu_entry_begin);

	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
			      (unsigned long)shadow_cpu_entry_end, 0);

	kasan_populate_early_shadow(shadow_cpu_entry_end,
			kasan_mem_to_shadow((void *)__START_KERNEL_map));

	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
			      (unsigned long)kasan_mem_to_shadow(_end),
			      early_pfn_to_nid(__pa(_stext)));

	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
					(void *)KASAN_SHADOW_END);

	load_cr3(init_top_pgt);
	__flush_tlb_all();

	/*
	 * kasan_early_shadow_page has been used as early shadow memory, thus
	 * it may contain some garbage. Now we can clear and write protect it,
	 * since after the TLB flush no one should write to it.
	 */
	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte_t pte;
		pgprot_t prot;

		prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
		pgprot_val(prot) &= __default_kernel_pte_mask;

		pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
		set_pte(&kasan_early_shadow_pte[i], pte);
	}
	/* Flush TLBs again to be sure that write protection applied. */
	__flush_tlb_all();

	init_task.kasan_depth = 0;
	pr_info("KernelAddressSanitizer initialized\n");
}
Exemplo n.º 6
0
void ioapic_init(void)
{
	map_range(IOAPIC_BASE, PAGE_SIZE, MAP_UNCACHED);
}
Exemplo n.º 7
0
void inmate_main(void)
{
	enum { ROLE_UNDEFINED, ROLE_CONTROLLER, ROLE_TARGET } role;
	unsigned long min = -1, max = 0, rtt;
	struct eth_header *rx_packet;
	unsigned long long start;
	bool first_round = true;
	unsigned int n;
	u32 eerd, val;
	u8 mac[6];
	u64 bar;
	int bdf;

	printk_uart_base = UART_BASE;

	bdf = pci_find_device(PCI_ID_ANY, PCI_ID_ANY, 0);
	if (bdf < 0) {
		printk("No device found!\n");
		return;
	}
	printk("Found %04x:%04x at %02x:%02x.%x\n",
	       pci_read_config(bdf, PCI_CFG_VENDOR_ID, 2),
	       pci_read_config(bdf, PCI_CFG_DEVICE_ID, 2),
	       bdf >> 8, (bdf >> 3) & 0x1f, bdf & 0x3);

	bar = pci_read_config(bdf, PCI_CFG_BAR, 4);
	if ((bar & 0x6) == 0x4)
		bar |= (u64)pci_read_config(bdf, PCI_CFG_BAR + 4, 4) << 32;
	mmiobar = (void *)(bar & ~0xfUL);
	map_range(mmiobar, 128 * 1024, MAP_UNCACHED);
	printk("MMIO register BAR at %p\n", mmiobar);

	pci_write_config(bdf, PCI_CFG_COMMAND,
			 PCI_CMD_MEM | PCI_CMD_MASTER, 2);

	mmio_write32(mmiobar + E1000_REG_CTRL, E1000_CTRL_RST);
	delay_us(20000);

	val = mmio_read32(mmiobar + E1000_REG_CTRL);
	val &= ~(E1000_CTRL_LRST | E1000_CTRL_FRCSPD);
	val |= E1000_CTRL_ASDE | E1000_CTRL_SLU;
	mmio_write32(mmiobar + E1000_REG_CTRL, val);
	printk("Reset done, waiting for link...");

	while (!(mmio_read32(mmiobar + E1000_REG_STATUS) & E1000_STATUS_LU))
		cpu_relax();
	printk(" ok\n");

	if (mmio_read32(mmiobar + E1000_REG_RAH) & E1000_RAH_AV) {
		*(u32 *)mac = mmio_read32(mmiobar + E1000_REG_RAL);
		*(u16 *)&mac[4] = mmio_read32(mmiobar + E1000_REG_RAH);
	} else {
		for (n = 0; n < 3; n++) {
			mmio_write32(mmiobar + E1000_REG_EERD,
				     E1000_EERD_START |
				     (n << E1000_EERD_ADDR_SHIFT));
			do {
				eerd = mmio_read32(mmiobar + E1000_REG_EERD);
				cpu_relax();
			} while (!(eerd & E1000_EERD_DONE));
			mac[n * 2] = (u8)(eerd >> E1000_EERD_DATA_SHIFT);
			mac[n * 2 + 1] =
				(u8)(eerd >> (E1000_EERD_DATA_SHIFT + 8));
		}
	}

	printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
	       mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);

	mmio_write32(mmiobar + E1000_REG_RAL, *(u32 *)mac);
	mmio_write32(mmiobar + E1000_REG_RAH, *(u16 *)&mac[4] | E1000_RAH_AV);

	for (n = 0; n < RX_DESCRIPTORS; n++)
		rx_ring[n].addr = (unsigned long)&buffer[n * RX_BUFFER_SIZE];
	mmio_write32(mmiobar + E1000_REG_RDBAL, (unsigned long)&rx_ring);
	mmio_write32(mmiobar + E1000_REG_RDBAH, 0);
	mmio_write32(mmiobar + E1000_REG_RDLEN, sizeof(rx_ring));
	mmio_write32(mmiobar + E1000_REG_RDH, 0);
	mmio_write32(mmiobar + E1000_REG_RDT, RX_DESCRIPTORS - 1);

	val = mmio_read32(mmiobar + E1000_REG_RCTL);
	val |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_BSIZE_2048 |
		E1000_RCTL_SECRC;
	mmio_write32(mmiobar + E1000_REG_RCTL, val);

	mmio_write32(mmiobar + E1000_REG_TDBAL, (unsigned long)&tx_ring);
	mmio_write32(mmiobar + E1000_REG_TDBAH, 0);
	mmio_write32(mmiobar + E1000_REG_TDLEN, sizeof(tx_ring));
	mmio_write32(mmiobar + E1000_REG_TDH, 0);
	mmio_write32(mmiobar + E1000_REG_TDT, 0);

	val = mmio_read32(mmiobar + E1000_REG_TCTL);
	val |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_CT_DEF |
		E1000_TCTL_COLD_DEF;
	mmio_write32(mmiobar + E1000_REG_TCTL, val);
	mmio_write32(mmiobar + E1000_REG_TIPG,
		     E1000_TIPG_IPGT_DEF | E1000_TIPG_IPGR1_DEF |
		     E1000_TIPG_IPGR2_DEF);

	role = ROLE_UNDEFINED;

	memcpy(tx_packet.src, mac, sizeof(tx_packet.src));
	memset(tx_packet.dst, 0xff, sizeof(tx_packet.dst));
	tx_packet.type = FRAME_TYPE_ANNOUNCE;
	send_packet(&tx_packet, sizeof(tx_packet));

	start = pm_timer_read();
	while (pm_timer_read() - start < NS_PER_MSEC &&
	       role == ROLE_UNDEFINED) {
		rx_packet = packet_received();
		if (!rx_packet)
			continue;

		if (rx_packet->type == FRAME_TYPE_TARGET_ROLE) {
			role = ROLE_TARGET;
			memcpy(tx_packet.dst, rx_packet->src,
			       sizeof(tx_packet.dst));
		}
		packet_reception_done();
	}

	if (role == ROLE_UNDEFINED) {
		role = ROLE_CONTROLLER;
		printk("Waiting for peer\n");
		while (1) {
			rx_packet = packet_received();
			if (!rx_packet)
				continue;

			if (rx_packet->type == FRAME_TYPE_ANNOUNCE) {
				memcpy(tx_packet.dst, rx_packet->src,
				       sizeof(tx_packet.dst));
				packet_reception_done();

				tx_packet.type = FRAME_TYPE_TARGET_ROLE;
				send_packet(&tx_packet, sizeof(tx_packet));
				break;
			} else {
				packet_reception_done();
			}
		}
	}

	mmio_write32(mmiobar + E1000_REG_RCTL,
		     mmio_read32(mmiobar + E1000_REG_RCTL) & ~E1000_RCTL_BAM);

	if (role == ROLE_CONTROLLER) {
		printk("Running as controller\n");
		tx_packet.type = FRAME_TYPE_PING;
		while (1) {
			start = pm_timer_read();
			send_packet(&tx_packet, sizeof(tx_packet));

			do
				rx_packet = packet_received();
			while (!rx_packet ||
			       rx_packet->type != FRAME_TYPE_PONG);
			packet_reception_done();

			if (!first_round) {
				rtt = pm_timer_read() - start;
				if (rtt < min)
					min = rtt;
				if (rtt > max)
					max = rtt;
				printk("Received pong, RTT: %6ld ns, "
				       "min: %6ld ns, max: %6ld ns\n",
				       rtt, min, max);
			}
			first_round = false;
			delay_us(100000);
		}
	} else {
		printk("Running as target\n");
		tx_packet.type = FRAME_TYPE_PONG;
		while (1) {
			rx_packet = packet_received();
			if (!rx_packet || rx_packet->type != FRAME_TYPE_PING)
				continue;
			packet_reception_done();
			send_packet(&tx_packet, sizeof(tx_packet));
		}
	}
}
// When populating the physical allocator, certain addresses are unavailable or
// reserved. The multiboot tables give a sorted list of regions which are
// useable memory, but some of these contain the kernel, modules, or the
// multiboot tables themselves.
static void insert_regions(struct multiboot_info *multiboot_tables) {
    struct reserved_region {
        phys_addr start;
        phys_addr end;
    };
    enum status status;
    size_t modules_index_start;
    size_t reserved_count;
    bool inserted = false;
    uint64_t total = 0;

    struct multiboot_mod_list *modules =
        (struct multiboot_mod_list *)(uintptr_t)multiboot_tables->mods_addr;
    struct multiboot_mmap_entry *mmap =
        (struct multiboot_mmap_entry *)(uintptr_t)multiboot_tables->mmap_addr;

    uintptr_t mods_list_start = align_as(multiboot_tables->mods_addr,
                                         Page_Small);
    uintptr_t mods_list_end = round_next(multiboot_tables->mods_addr +
                                         multiboot_tables->mods_count *
                                         sizeof(struct multiboot_mod_list),
                                         Page_Small);

    if (physical_region_contains(Boot_Map_Start, Boot_Map_End,
                                 mods_list_start, mods_list_end)) {
        modules_index_start = 1;
        reserved_count = multiboot_tables->mods_count + 1;
    } else {
        modules_index_start = 2;
        reserved_count = multiboot_tables->mods_count + 2;

        status = map_range(modules, mods_list_start,
                           (mods_list_end - mods_list_start) / Page_Small,
                           Memory_No_Attributes);
        assert_ok(status);
    }

    struct reserved_region reserved[reserved_count];

    if (modules_index_start == 2) {
        if (Boot_Map_Start < mods_list_start) {
            reserved[0].start = Boot_Map_Start;
            reserved[0].end = Boot_Map_End;
            reserved[1].start = mods_list_start;
            reserved[1].end = mods_list_end;
        } else {
            reserved[0].start = mods_list_start;
            reserved[0].end = mods_list_end;
            reserved[1].start = Boot_Map_Start;
            reserved[1].end = Boot_Map_End;
        }
    } else {
        reserved[0].start = Boot_Map_Start;
        reserved[0].end = Boot_Map_End;
    }

    for (size_t i = 0;
         i < multiboot_tables->mods_count; ++i) {

        reserved[i + modules_index_start].start = modules[i].mod_start;
        reserved[i + modules_index_start].end = round_next(modules[i].mod_end,
                                                           Page_Small);
    }

    for (size_t i = 0; i < (multiboot_tables->mmap_length /
                            sizeof(struct multiboot_mmap_entry)); ++i) {
        inserted = false;
        if (mmap[i].type == MULTIBOOT_MEMORY_AVAILABLE) {
            total += mmap[i].len;
            for (size_t j = 0; j < reserved_count; ++j) {
                uintptr_t mem_start = mmap[i].addr;
                uintptr_t mem_end = mmap[i].addr + mmap[i].len;
                if (physical_region_contains(mem_start, mem_end,
                                             reserved[j].start,
                                             reserved[j].end)) {
                    inserted = true;
                    if (reserved[j].start > mem_start) {
                        physical_free_range(mem_start,
                                            reserved[j].start - mem_start);
                    }
                    if (j + 1 < reserved_count &&
                            physical_region_contains(mem_start, mem_end,
                                                     reserved[j + 1].start,
                                                     reserved[j + 1].end)) {
                        if (reserved[j].end != reserved[j + 1].start) {
                            physical_free_range(reserved[j].end,
                                                reserved[j + 1].start -
                                                reserved[j].end);
                        }
                        mmap[i].addr = reserved[j + 1].start;
                        mmap[i].len -= reserved[j + 1].start -
                                       reserved[j].start;
                    } else if (reserved[j].end < mem_end) {
                        physical_free_range(reserved[j].end,
                                            mem_end - reserved[j].end);
                    }
                }
            }
            if (!inserted) {
                physical_free_range(mmap[i].addr,
                                    mmap[i].len);
            }
        }
    }

    if (modules_index_start == 2) {
        unmap_range(modules, (mods_list_end - mods_list_start) / Page_Small,
                    false);
    }
}