static void l4ser_shm_rx_chars(struct uart_port *port) { struct l4ser_shm_uart_port *l4port = (struct l4ser_shm_uart_port *)port; struct tty_struct *tty = port->state->port.tty; struct chunk_head *chhead; struct ring_chunk_head *rph; unsigned long offs; chhead = (struct chunk_head *)l4shmc_chunk_ptr(&l4port->rx_chunk); offs = chhead->next_offs_to_read; while (1) { unsigned long l; rph = (struct ring_chunk_head *)(l4port->rx_ring_start + offs); if (!rph->size) break; offs += sizeof(struct ring_chunk_head); offs %= l4port->rx_ring_size; if (offs + rph->size > l4port->rx_ring_size) l = l4port->rx_ring_size - offs; else l = rph->size; port->icount.rx += rph->size; tty_insert_flip_string(tty, (const unsigned char *)l4port->rx_ring_start + offs, l); if (l != rph->size) tty_insert_flip_string(tty, (const unsigned char *)l4port->rx_ring_start, rph->size - l); offs = (offs + rph->size + sizeof(struct ring_chunk_head) - 1) & ~(sizeof(struct ring_chunk_head) - 1); offs %= l4port->rx_ring_size; chhead->next_offs_to_read = offs; rph->size = 0; } tty_flip_buffer_push(tty); if (chhead->writer_blocked) { L4XV_V(f); L4XV_L(f); l4shmc_trigger(&l4port->tx_sig); L4XV_U(f); } chhead = (struct chunk_head *)l4shmc_chunk_ptr(&l4port->tx_chunk); chhead->writer_blocked = 0; return; }
int l4x_kvm_create_vm(struct kvm *kvm) { l4_msgtag_t t; l4_utcb_t *u = l4_utcb(); int r; L4XV_V(f); kvm->arch.l4vmcap = L4_INVALID_CAP; if (l4lx_task_get_new_task(L4_INVALID_CAP, &kvm->arch.l4vmcap)) { printk("%s: could not allocate task cap\n", __func__); return -ENOENT; } L4XV_L(f); t = l4_factory_create_vm_u(l4re_env()->factory, kvm->arch.l4vmcap, u); if (unlikely((r = l4_error_u(t, u)))) { printk("%s: kvm task creation failed cap=%08lx: %d\n", __func__, kvm->arch.l4vmcap, r); l4lx_task_number_free(kvm->arch.l4vmcap); L4XV_U(f); return -ENOENT; } L4XV_U(f); printk("%s: cap = %08lx\n", __func__, kvm->arch.l4vmcap); #ifdef CONFIG_L4_DEBUG_REGISTER_NAMES L4XV_L(f); l4_debugger_set_object_name(kvm->arch.l4vmcap, "kvmVM"); L4XV_U(f); #endif return 0; }
static void l4ser_shm_tx_chars(struct uart_port *port) { struct l4ser_shm_uart_port *l4port = (struct l4ser_shm_uart_port *)port; struct circ_buf *xmit = &port->state->xmit; int c, do_trigger = 0; struct tty_struct *tty = port->state->port.tty; tty->hw_stopped = 0; tty->stopped = 0; if (port->x_char) { if (tx_buf(port, &port->x_char, 1)) { L4XV_V(f); port->icount.tx++; port->x_char = 0; L4XV_L(f); l4shmc_trigger(&l4port->tx_sig); L4XV_U(f); } return; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { return; } while (!uart_circ_empty(xmit)) { unsigned long r; c = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); if (!(r = tx_buf(port, &xmit->buf[xmit->tail], c))) break; xmit->tail = (xmit->tail + r) & (UART_XMIT_SIZE - 1); port->icount.tx += r; do_trigger = 1; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (do_trigger) { L4XV_V(f); L4XV_L(f); l4shmc_trigger(&l4port->tx_sig); L4XV_U(f); } }
int l4vpci_irq_enable(struct pci_dev *dev) { unsigned char trigger, polarity; int irq; u8 pin = 0; unsigned flags; l4_uint32_t devfn; L4XV_V(f); if (!dev) return -EINVAL; pin = dev->pin; if (!pin) { dev_warn(&dev->dev, "No interrupt pin configured for device %s\n", pci_name(dev)); return 0; } pin--; if (!dev->bus) { dev_err(&dev->dev, "invalid (NULL) 'bus' field\n"); return -ENODEV; } L4XV_L(f); devfn = (PCI_SLOT(dev->devfn) << 16) | PCI_FUNC(dev->devfn); irq = l4vbus_pci_irq_enable(vbus, root_bridge, dev->bus->number, devfn, pin, &trigger, &polarity); if (irq < 0) { dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin); /* Interrupt Line values above 0xF are forbidden */ return 0; } L4XV_U(f); switch ((!!trigger) | ((!!polarity) << 1)) { case 0: flags = IRQF_TRIGGER_HIGH; break; case 1: flags = IRQF_TRIGGER_RISING; break; case 2: flags = IRQF_TRIGGER_LOW; break; case 3: flags = IRQF_TRIGGER_FALLING; break; default: flags = 0; break; } dev->irq = irq; l4lx_irq_set_type(irq_get_irq_data(irq), flags); dev_info(&dev->dev, "PCI INT %c -> GSI %u (%s, %s) -> IRQ %d\n", 'A' + pin, irq, !trigger ? "level" : "edge", polarity ? "low" : "high", dev->irq); return 0; }
static int pci_conf1_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { l4_uint32_t df = (PCI_SLOT(devfn) << 16) | PCI_FUNC(devfn); int r; L4XV_V(f); L4XV_L(f); r = l4vbus_pci_cfg_write(vbus, root_bridge, bus, df, reg, value, len * 8); L4XV_U(f); return r; }
static void res_event(l4fdx_srv_obj srv_obj, struct l4fdx_result_t *r, unsigned client_req_id) { L4XV_V(fl); r->time = l4_kip_clock(l4lx_kinfo); r->payload.client_req_id = client_req_id; L4XV_L(fl); l4x_fdx_srv_add_event(srv_obj, r); l4x_fdx_srv_trigger(srv_obj); L4XV_U(fl); }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ //l4/smp_cross_call(cpumask_of(cpu), 1); l4x_cpu_release(cpu); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; //udelay(10); { L4XV_V(f); L4XV_L(f); l4_sleep(10); L4XV_U(f); } } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; //pen_release != -1 ? -ENOSYS : 0; }
int l4x_kvm_svm_run(struct kvm_vcpu *kvcpu, unsigned long vmcb) { l4_msgtag_t tag; unsigned cpu; unsigned long orig_state, orig_saved_state; l4_vcpu_state_t *vcpu; L4XV_V(f); L4XV_L(f); cpu = smp_processor_id(); vcpu = l4x_vcpu_state(cpu); orig_state = vcpu->state; vcpu->state = L4_VCPU_F_FPU_ENABLED; orig_saved_state = vcpu->saved_state; vcpu->saved_state = L4_VCPU_F_USER_MODE | L4_VCPU_F_FPU_ENABLED; vcpu->user_task = kvcpu->kvm->arch.l4vmcap; l4x_kvm_kvm_to_l4vcpu(kvcpu, vcpu); memcpy((char *)vcpu + L4_VCPU_OFFSET_EXT_STATE, (void *)vmcb, L4_PAGESIZE - L4_VCPU_OFFSET_EXT_STATE); tag = l4_thread_vcpu_resume_start(); tag = l4_thread_vcpu_resume_commit(L4_INVALID_CAP, tag); l4x_kvm_l4vcpu_to_kvm(vcpu, kvcpu); memcpy((void *)vmcb, (char *)vcpu + L4_VCPU_OFFSET_EXT_STATE, L4_PAGESIZE - L4_VCPU_OFFSET_EXT_STATE); vcpu->user_task = current->mm->context.task; vcpu->state = orig_state; vcpu->saved_state = orig_saved_state; if (l4_error(tag) < 0) { L4XV_U(f); printk("%s: vm run failed with %ld\n", __func__, l4_error(tag)); return 1; } L4XV_U(f); return 0; }
static int __init l4vpci_init(void) { struct pci_dev *dev = NULL; #ifdef CONFIG_ARM struct pci_sys_data *sd; #else struct pci_sysdata *sd; #endif int err; L4XV_V(f); vbus = l4re_get_env_cap("vbus"); if (l4_is_invalid_cap(vbus)) return -ENOENT; L4XV_L(f); err = l4vbus_get_device_by_hid(vbus, 0, &root_bridge, "PNP0A03", 0, 0); if (err < 0) { printk(KERN_INFO "PCI: no root bridge found, no PCI\n"); L4XV_U(f); return err; } L4XV_U(f); printk(KERN_INFO "PCI: L4 root bridge is device %lx\n", root_bridge); sd = kzalloc(sizeof(*sd), GFP_KERNEL); if (!sd) return -ENOMEM; pci_scan_bus(0, &l4vpci_ops, sd); printk(KERN_INFO "PCI: Using L4-IO for IRQ routing\n"); for_each_pci_dev(dev) l4vpci_irq_enable(dev); #ifdef CONFIG_X86 pcibios_resource_survey(); #endif return 0; }
static int __init l4ser_shm_init_port(int num, const char *name) { int irq; struct chunk_head *ch; struct l4ser_shm_uart_port *p = &l4ser_shm_port[num]; L4XV_V(f); if (p->inited) return 0; p->inited = 1; if (shmsize < PAGE_SIZE) shmsize = PAGE_SIZE; pr_info("l4ser_shm: Requesting, role %s, Shmsize %d Kbytes\n", p->create ? "Creator" : "User", shmsize >> 10); L4XV_L(f); if (p->create) { if (l4shmc_create(name, shmsize)) { L4XV_U(f); pr_err("l4ser_shm/%s: Failed to create shm\n", p->name); return -ENOMEM; } } if (l4shmc_attach_to(name, WAIT_TIMEOUT, &p->shmcarea)) { L4XV_U(f); pr_err("l4ser_shm/%s: Failed to attach to shm\n", p->name); return -ENOMEM; } if (l4shmc_add_chunk(&p->shmcarea, p->create ? "joe" : "bob", chunk_size(&p->shmcarea), &p->tx_chunk)) goto unlock; if (l4shmc_add_signal(&p->shmcarea, p->create ? "joe" : "bob", &p->tx_sig)) goto unlock; if (l4shmc_connect_chunk_signal(&p->tx_chunk, &p->tx_sig)) goto unlock; /* Now get the receiving side */ if (l4shmc_get_chunk_to(&p->shmcarea, p->create ? "bob" : "joe", WAIT_TIMEOUT, &p->rx_chunk)) goto unlock; if (l4shmc_get_signal_to(&p->shmcarea, p->create ? "bob" : "joe", WAIT_TIMEOUT, &p->rx_sig)) goto unlock; if (l4shmc_connect_chunk_signal(&p->rx_chunk, &p->rx_sig)) goto unlock; L4XV_U(f); if ((irq = l4x_register_irq(l4shmc_signal_cap(&p->rx_sig))) < 0) return -ENOMEM; ch = (struct chunk_head *)l4shmc_chunk_ptr(&p->tx_chunk); ch->next_offs_to_write = 0; ch->next_offs_to_read = 0; ch->writer_blocked = 0; p->tx_ring_size = l4shmc_chunk_capacity(&p->tx_chunk) - sizeof(struct chunk_head); p->rx_ring_size = l4shmc_chunk_capacity(&p->rx_chunk) - sizeof(struct chunk_head); p->tx_ring_start = (char *)l4shmc_chunk_ptr(&p->tx_chunk) + sizeof(struct chunk_head); p->rx_ring_start = (char *)l4shmc_chunk_ptr(&p->rx_chunk) + sizeof(struct chunk_head); p->port.uartclk = 3686400; p->port.ops = &l4ser_shm_pops; p->port.fifosize = 8; p->port.line = num; p->port.iotype = UPIO_MEM; p->port.membase = (void *)1; p->port.mapbase = 1; p->port.flags = UPF_BOOT_AUTOCONF; p->port.irq = irq; return 0; unlock: L4XV_U(f); return -ENOMEM; }
static int __init l4x_timer_init_ret(void) { int r; l4lx_thread_t thread; int irq; L4XV_V(f); timer_irq_cap = l4x_cap_alloc(); if (l4_is_invalid_cap(timer_irq_cap)) { printk(KERN_ERR "l4timer: Failed to alloc\n"); return -ENOMEM; } r = L4XV_FN_i(l4_error(l4_factory_create_irq(l4re_env()->factory, timer_irq_cap))); if (r) { printk(KERN_ERR "l4timer: Failed to create irq: %d\n", r); goto out1; } if ((irq = l4x_register_irq(timer_irq_cap)) < 0) { r = -ENOMEM; goto out2; } printk("l4timer: Using IRQ%d\n", irq); setup_irq(irq, &l4timer_irq); L4XV_L(f); thread = l4lx_thread_create (timer_thread, /* thread function */ smp_processor_id(), /* cpu */ NULL, /* stack */ &timer_irq_cap, sizeof(timer_irq_cap), /* data */ l4x_cap_alloc(), /* cap */ PRIO_TIMER, /* prio */ 0, /* vcpup */ "timer", /* name */ NULL); L4XV_U(f); timer_srv = l4lx_thread_get_cap(thread); if (!l4lx_thread_is_valid(thread)) { printk(KERN_ERR "l4timer: Failed to create thread\n"); r = -ENOMEM; goto out3; } l4timer_clockevent.irq = irq; l4timer_clockevent.mult = div_sc(1000000, NSEC_PER_SEC, l4timer_clockevent.shift); l4timer_clockevent.max_delta_ns = clockevent_delta2ns(0xffffffff, &l4timer_clockevent); l4timer_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &l4timer_clockevent); l4timer_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&l4timer_clockevent); return 0; out3: l4x_unregister_irq(irq); out2: L4XV_FN_v(l4_task_delete_obj(L4RE_THIS_TASK_CAP, timer_irq_cap)); out1: l4x_cap_free(timer_irq_cap); return r; }
static void l4x_flush_page(struct mm_struct *mm, unsigned long address, unsigned long vaddr, int size, unsigned long flush_rights) { l4_msgtag_t tag; if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP) return; /* some checks: */ if (address > 0x80000000UL) { unsigned long remap; remap = find_ioremap_entry(address); /* VU: it may happen, that memory is not remapped but mapped in * user space, if a task mmaps /dev/mem but never accesses it. * Therefore, we fail silently... */ if (!remap) return; address = remap; } else if ((address & PAGE_MASK) == 0) address = PAGE0_PAGE_ADDRESS; #if 0 /* only for debugging */ else { if ((address >= (unsigned long)high_memory) && (address < 0x80000000UL)) { printk("flushing non physical page (0x%lx)\n", address); enter_kdebug("flush_page: non physical page"); } } #endif /* do the real flush */ if (mm && !l4_is_invalid_cap(mm->context.task)) { L4XV_V(f); if (!mm->context.task) l4x_printf("%s: Ups, task == 0\n", __func__); /* Direct flush in the child, use virtual address in the * child address space */ L4XV_L(f); tag = l4_task_unmap(mm->context.task, l4_fpage(vaddr & PAGE_MASK, size, flush_rights), L4_FP_ALL_SPACES); L4XV_U(f); } else { L4XV_V(f); /* Flush all pages in all childs using the 'physical' * address known in the Linux server */ L4XV_L(f); tag = l4_task_unmap(L4RE_THIS_TASK_CAP, l4_fpage(address & PAGE_MASK, size, flush_rights), L4_FP_OTHER_SPACES); L4XV_U(f); } if (l4_error(tag)) l4x_printf("l4_task_unmap error %ld\n", l4_error(tag)); }