void arch_context_switch(thread_t *oldthread, thread_t *newthread) { LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name); arm64_fpu_pre_context_switch(oldthread); #if WITH_SMP DSB; /* broadcast tlb operations in case the thread moves to another cpu */ #endif arm64_context_switch(&oldthread->arch.sp, newthread->arch.sp); }
static void initial_thread_func(void) { int ret; thread_t *current_thread = get_current_thread(); LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg); /* release the thread lock that was implicitly held across the reschedule */ spin_unlock(&thread_lock); arch_enable_ints(); ret = current_thread->entry(current_thread->arg); LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret); thread_exit(ret); }
/* do a path parse, looking up each component */ int ext2_lookup(ext2_t *ext2, const char *_path, inodenum_t *inum) { LTRACEF("path '%s', inum %p\n", _path, inum); char path[512]; strlcpy(path, _path, sizeof(path)); return ext2_walk(ext2, path, &ext2->root_inode, inum, 1); }
static void initial_thread_func(void) { thread_t *ct = get_current_thread(); #if LOCAL_TRACE LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg); dump_thread(ct); #endif /* exit the implicit critical section we're within */ exit_critical_section(); int ret = ct->entry(ct->arg); LTRACEF("thread %p exiting with %d\n", ct, ret); thread_exit(ret); }
static bool page_table_is_clear(pte_t *page_table, uint page_size_shift) { int i; int count = 1U << (page_size_shift - 3); pte_t pte; for (i = 0; i < count; i++) { pte = page_table[i]; if (pte != MMU_PTE_DESCRIPTOR_INVALID) { LTRACEF("page_table at %p still in use, index %d is 0x%llx\n", page_table, i, pte); return false; } } LTRACEF("page table at %p is clear\n", page_table); return true; }
static status_t rx_callback(ep_t endpoint, struct usbc_transfer *transfer) { LTRACEF("ep %u, transfer %p\n", endpoint, transfer); rxqueued = false; event_signal(&testevent, false); return NO_ERROR; }
static uint64_t calibrate_tsc_count(uint16_t duration_ms) { uint64_t best_time = UINT64_MAX; for (int tries = 0; tries < 3; ++tries) { switch (calibration_clock) { case CLOCK_HPET: hpet_calibration_cycle_preamble(); break; case CLOCK_PIT: pit_calibration_cycle_preamble(duration_ms); break; default: PANIC_UNIMPLEMENTED; } // Use CPUID to serialize the instruction stream uint32_t _ignored; cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); uint64_t start = rdtsc(); cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); switch (calibration_clock) { case CLOCK_HPET: hpet_calibration_cycle(duration_ms); break; case CLOCK_PIT: pit_calibration_cycle(duration_ms); break; default: PANIC_UNIMPLEMENTED; } cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); zx_ticks_t end = rdtsc(); cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); zx_ticks_t tsc_ticks = end - start; if (tsc_ticks < best_time) { best_time = tsc_ticks; } LTRACEF("Calibration trial %d found %" PRIu64 " ticks/ms\n", tries, tsc_ticks); switch (calibration_clock) { case CLOCK_HPET: hpet_calibration_cycle_cleanup(); break; case CLOCK_PIT: pit_calibration_cycle_cleanup(); break; default: PANIC_UNIMPLEMENTED; } } return best_time; }
status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features) { LTRACEF("dev %p, host_features 0x%x\n", dev, host_features); /* allocate a new gpu device */ struct virtio_gpu_dev *gdev = malloc(sizeof(struct virtio_gpu_dev)); if (!gdev) return ERR_NO_MEMORY; mutex_init(&gdev->lock); event_init(&gdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL); event_init(&gdev->flush_event, false, EVENT_FLAG_AUTOUNSIGNAL); gdev->dev = dev; dev->priv = gdev; gdev->pmode_id = -1; gdev->next_resource_id = 1; /* allocate memory for a gpu request */ #if WITH_KERNEL_VM gdev->gpu_request = pmm_alloc_kpage(); gdev->gpu_request_phys = vaddr_to_paddr(gdev->gpu_request); #else gdev->gpu_request = malloc(sizeof(struct virtio_gpu_resp_display_info)); // XXX get size better gdev->gpu_request_phys = (paddr_t)gdev->gpu_request; #endif /* make sure the device is reset */ virtio_reset_device(dev); volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr; dump_gpu_config(config); /* ack and set the driver status bit */ virtio_status_acknowledge_driver(dev); // XXX check features bits and ack/nak them /* allocate a virtio ring */ virtio_alloc_ring(dev, 0, 16); /* set our irq handler */ dev->irq_driver_callback = &virtio_gpu_irq_driver_callback; dev->config_change_callback = &virtio_gpu_config_change_callback; /* set DRIVER_OK */ virtio_status_driver_ok(dev); /* save the main device we've found */ the_gdev = gdev; printf("found virtio gpu device\n"); return NO_ERROR; }
static ssize_t mem_bdev_write(bdev_t *bdev, const void *buf, off_t offset, size_t len) { mem_bdev_t *mem = (mem_bdev_t *)bdev; LTRACEF("bdev %s, buf %p, offset %lld, len %zu\n", bdev->name, buf, offset, len); memcpy((uint8_t *)mem->ptr + offset, buf, len); return len; }
static void test_time_conversion_check_result(uint64_t a, uint64_t b, uint64_t limit, bool is32) { if (a != b) { uint64_t diff = is32 ? abs_int32(a - b) : abs_int64(a - b); if (diff <= limit) LTRACEF("ROUNDED by %llu (up to %llu allowed)\n", diff, limit); else TRACEF("FAIL, off by %llu\n", diff); } }
/* routines called from lib/watchdog */ status_t platform_watchdog_init(lk_time_t target_timeout, lk_time_t *recommended_pet_period) { LTRACEF("target_timeout %u\n", (uint32_t)target_timeout); /* make sure the swdt is stopped */ SWDT->MODE = SWDT_MODE_ZKEY | SWDT_MODE_RESERVED; /* make sure swdt has the proper clock */ SLCR->WDT_CLK_SEL = 0; // cpu 1x uint32_t swdt_clock = zynq_get_swdt_freq(); /* assuming a prescalar of / 4096, figure out the restart value */ uint32_t restart = ((swdt_clock / 4096) * target_timeout) / 1000; /* make sure the restart value is <= 24 bits */ if (restart > 0x00ffffff) restart = 0x00ffffff; LTRACEF("restart value %u\n", restart); /* the bottom 12 bits of restart are set to 0xfff by hardware */ restart |= 0xfff; /* pet period is / 2 the computed restart value */ if (recommended_pet_period) *recommended_pet_period = ((restart * 1000) / (swdt_clock / 4096)) / 2; LTRACEF("recommended pet period %u\n", (uint32_t)*recommended_pet_period); /* set up the swdt */ /* load counter restart (top 12 bits of restart count), pclk / 4096 */ SWDT->CONTROL = SWDT_CONTROL_CKEY | ((restart >> 12) << 2) | 3; /* zero it out */ SWDT->RESTART = SWDT_RESTART_RSTKEY; DMB; return NO_ERROR; }
static void initial_thread_func(void) { thread_t *ct = get_current_thread(); #if LOCAL_TRACE LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg); dump_thread(ct); #endif /* release the thread lock that was implicitly held across the reschedule */ spin_unlock(&thread_lock); arch_enable_ints(); int ret = ct->entry(ct->arg); LTRACEF("thread %p exiting with %d\n", ct, ret); thread_exit(ret); }
static ssize_t mem_bdev_write_block(struct bdev *bdev, const void *buf, bnum_t block, uint count) { mem_bdev_t *mem = (mem_bdev_t *)bdev; LTRACEF("bdev %s, buf %p, block %u, count %u\n", bdev->name, buf, block, count); memcpy((uint8_t *)mem->ptr + block * BLOCKSIZE, buf, count * BLOCKSIZE); return count * BLOCKSIZE; }
static uint32_t get_io_pll_freq(void) { LTRACEF("IO_PLL_CTRL 0x%x\n", SLCR_REG(IO_PLL_CTRL)); // XXX test that the pll is actually enabled uint32_t fdiv = BITS_SHIFT(SLCR_REG(IO_PLL_CTRL), 18, 12); return EXTERNAL_CLOCK_FREQ * fdiv; }
static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel, size_t size, uint index_shift, uint page_size_shift, pte_t *page_table, uint asid) { pte_t *next_page_table; vaddr_t index; size_t chunk_size; vaddr_t vaddr_rem; vaddr_t block_size; vaddr_t block_mask; pte_t pte; paddr_t page_table_paddr; LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, size 0x%lx, index shift %d, page_size_shift %d, page_table %p\n", vaddr, vaddr_rel, size, index_shift, page_size_shift, page_table); while (size) { block_size = 1UL << index_shift; block_mask = block_size - 1; vaddr_rem = vaddr_rel & block_mask; chunk_size = MIN(size, block_size - vaddr_rem); index = vaddr_rel >> index_shift; pte = page_table[index]; if (index_shift > page_size_shift && (pte & MMU_PTE_DESCRIPTOR_MASK) == MMU_PTE_L012_DESCRIPTOR_TABLE) { page_table_paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK; next_page_table = paddr_to_kvaddr(page_table_paddr); arm64_mmu_unmap_pt(vaddr, vaddr_rem, chunk_size, index_shift - (page_size_shift - 3), page_size_shift, next_page_table, asid); if (chunk_size == block_size || page_table_is_clear(next_page_table, page_size_shift)) { LTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index); page_table[index] = MMU_PTE_DESCRIPTOR_INVALID; __asm__ volatile("dmb ishst" ::: "memory"); free_page_table(next_page_table, page_table_paddr, page_size_shift); } } else if (pte) {
static status_t attach_backing(struct virtio_gpu_dev *gdev, uint32_t resource_id, void *ptr, size_t buf_len) { status_t err; LTRACEF("gdev %p, resource_id %u, ptr %p, buf_len %zu\n", gdev, resource_id, ptr, buf_len); DEBUG_ASSERT(gdev); DEBUG_ASSERT(ptr); /* grab a lock to keep this single message at a time */ mutex_acquire(&gdev->lock); /* construct the request */ struct { struct virtio_gpu_resource_attach_backing req; struct virtio_gpu_mem_entry mem; } req; memset(&req, 0, sizeof(req)); req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING; req.req.resource_id = resource_id; req.req.nr_entries = 1; paddr_t pa; pa = vaddr_to_paddr(ptr); req.mem.addr = pa; req.mem.length = buf_len; /* send the command and get a response */ struct virtio_gpu_ctrl_hdr *res; err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res)); DEBUG_ASSERT(err == NO_ERROR); /* see if we got a valid response */ LTRACEF("response type 0x%x\n", res->type); err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY; /* release the lock */ mutex_release(&gdev->lock); return err; }
void arch_mmu_context_switch(arch_aspace_t *aspace) { if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH) LTRACEF("aspace %p\n", aspace); uint32_t ttbr; uint32_t ttbcr = arm_read_ttbcr(); if (aspace) { ttbr = MMU_TTBRx_FLAGS | (aspace->tt_phys); ttbcr &= ~(1<<4); // enable TTBR0 } else { ttbr = 0; ttbcr |= (1<<4); // disable TTBR0 } if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH) LTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr); arm_write_ttbr0(ttbr); arm_write_ttbcr(ttbcr); }
static enum handler_return virtio_gpu_config_change_callback(struct virtio_device *dev) { struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv; LTRACEF("gdev %p\n", gdev); volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr; dump_gpu_config(config); return INT_RESCHEDULE; }
void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) { LTRACEF("epnum %u\n", epnum); if (epnum == 0) { usbc_ep0_ack(); } else if (usbc.ep_out[epnum].transfer) { // completing a transfer usbc_transfer_t *t = usbc.ep_out[epnum].transfer; usbc.ep_out[epnum].transfer = NULL; LTRACEF("completing transfer %p\n", t); PCD_EPTypeDef *ep = &hpcd->OUT_ep[epnum]; t->bufpos = ep->xfer_count; t->result = 0; t->callback(epnum, t); usbc.do_resched = true; } }
void arch_init(void) { arch_mp_init_percpu(); #if WITH_SMP LTRACEF("midr_el1 0x%llx\n", ARM64_READ_SYSREG(midr_el1)); secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */ lk_init_secondary_cpus(secondaries_to_init); LTRACEF("releasing %d secondary cpus\n", secondaries_to_init); /* release the secondary cpus */ spin_unlock(&arm_boot_cpu_lock); /* flush the release of the lock, since the secondary cpus are running without cache on */ arch_clean_cache_range((addr_t)&arm_boot_cpu_lock, sizeof(arm_boot_cpu_lock)); #endif }
status_t usbc_set_active(bool active) { LTRACEF("active %u\n", active); if (active) { HAL_PCD_Start(&usbc.handle); } else { HAL_PCD_Stop(&usbc.handle); } return NO_ERROR; }
status_t usbc_queue_tx(ep_t ep, usbc_transfer_t *transfer) { LTRACEF("ep %u, transfer %p (buf %p, buflen %zu)\n", ep, transfer, transfer->buf, transfer->buflen); DEBUG_ASSERT(ep <= NUM_EP); DEBUG_ASSERT(usbc.ep_in[ep].transfer == NULL); usbc.ep_in[ep].transfer = transfer; HAL_PCD_EP_Transmit(&usbc.handle, ep, transfer->buf, transfer->buflen); return NO_ERROR; }
static void send_pdcc_command(uint32_t opcode, uint32_t data) { uint32_t word; word = PDCC_VALID | ((opcode & 0x7f) << PDCC_OPCODE_SHIFT) | (data & 0x00ffffff); // XXX may block forever LTRACEF("sending 0x%x\n", word); arm_dcc_write(&word, 1, INFINITE_TIME); }
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval) { LTRACEF("cb %p, arg %p, interval %ld\n", callback, arg, interval); uint32_t ttmr = (uint64_t)timer_freq * interval / 1000; LTRACEF("count 0x%x\n", ttmr); timer_cb = callback; timer_arg = arg; /* disable timer before doing changes */ mtspr(OR1K_SPR_TICK_TTMR_ADDR, 0); /* reset timer counter */ mtspr(OR1K_SPR_TICK_TTCR_ADDR, 0); /* enable timer with given interval in 'restart' mode */ ttmr = OR1K_SPR_TICK_TTMR_MODE_SET(ttmr | OR1K_SPR_TICK_TTMR_IE_MASK, OR1K_SPR_TICK_TTMR_MODE_RESTART); mtspr(OR1K_SPR_TICK_TTMR_ADDR, ttmr); return NO_ERROR; }
/* * @brief handle "logout" request */ static int novacom_handle_authlogout(device_handle_t dev, uint32_t chan, buffer_t *b, buffer_t *r, unsigned char cmd) { bool res; LTRACEF("disconnect session command\n"); res = auth_reset_state(); if (false == res) buffer_putbyte(r, SSH_MSG_USERAUTH_FAILURE); else buffer_putbyte(r, SSH_MSG_USERAUTH_SUCCESS); buffer_putbyte(r, 0); buffer_putbyte(r, cmd); return (true == res)?0:-1; }
status_t usbc_set_active(bool active) { LTRACEF("active %d\n", active); if (active) { USBIntEnableControl(USB0_BASE, USB_INTCTRL_CONNECT | USB_INTCTRL_RESET); USBIntEnableEndpoint(USB0_BASE, USB_INTEP_0); USBDevConnect(USB0_BASE); } else { USBDevDisconnect(USB0_BASE); } return NO_ERROR; }
void heap_free(zone_type zone, void *ptr) { if (ptr == 0) return; LTRACEF("ptr %p\n", ptr); // check for the old allocation structure struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; as--; DEBUG_ASSERT(as->magic == HEAP_MAGIC); LTRACEF("allocation was %zd bytes long at ptr %p\n", as->size, as->ptr); // looks good, create a free chunk and add it to the pool enter_critical_section(); heap_insert_free_chunk(zone, heap_create_free_chunk(as->ptr, as->size)); exit_critical_section(); // heap_dump(); }
status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) { LTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags); DEBUG_ASSERT(aspace); /* validate that the base + size is sane and doesn't wrap */ DEBUG_ASSERT(size > PAGE_SIZE); DEBUG_ASSERT(base + size - 1 > base); list_initialize(&aspace->pt_page_list); if (flags & ARCH_ASPACE_FLAG_KERNEL) { aspace->base = base; aspace->size = size; aspace->tt_virt = arm_kernel_translation_table; aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt); } else { // XXX at the moment we can only really deal with 1GB user space, and thus // needing only a single page for the top level translation table DEBUG_ASSERT(base < GB && (base + size) <= GB); aspace->base = base; aspace->size = size; uint32_t *va = pmm_alloc_kpages(1, &aspace->pt_page_list); if (!va) return ERR_NO_MEMORY; aspace->tt_virt = va; aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt); } LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt); return NO_ERROR; }
uint32_t zynq_get_clock(enum zynq_periph periph) { DEBUG_ASSERT(periph < _PERIPH_MAX); // get the clock control register base addr_t clk_reg = periph_clk_ctrl_reg(periph); DEBUG_ASSERT(clk_reg != 0); int enable_bitpos = periph_clk_ctrl_enable_bitpos(periph); LTRACEF("clkreg 0x%x\n", *REG32(clk_reg)); // see if it's enabled if (enable_bitpos >= 0) { if ((*REG32(clk_reg) & (1 << enable_bitpos)) == 0) { // not enabled return 0; } } // get the source clock uint32_t srcclk; switch (BITS_SHIFT(*REG32(clk_reg), 5, 4)) { case 0: case 1: srcclk = get_io_pll_freq(); break; case 2: srcclk = get_arm_pll_freq(); break; case 3: srcclk = get_ddr_pll_freq(); break; } // get the divisor out of the register uint32_t divisor = BITS_SHIFT(*REG32(clk_reg), 13, 8); if (divisor == 0) return 0; uint32_t divisor2 = 1; if (periph_clk_ctrl_divisor_count(periph) == 2) { divisor2 = BITS_SHIFT(*REG32(clk_reg), 25, 20); if (divisor2 == 0) return 0; } uint32_t clk = srcclk / divisor / divisor2; return clk; }
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval) { LTRACEF("callback %p, arg %p, interval %u\n", callback, arg, (uint)interval); cb = callback; cb_args = arg; RCC_ClocksTypeDef clocks; RCC_GetClocksFreq(&clocks); cm3_systick_set_periodic(clocks.SYSCLK_Frequency, interval); return NO_ERROR; }