Example #1
0
DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       RTR0MEMOBJTYPE_CONT, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true, VERR_NO_CONT_MEMORY);
    if (RT_FAILURE(rc))
    {
        rtR0MemObjDelete(&pMemFreeBSD->Core);
        return rc;
    }

    pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
    *ppMem = &pMemFreeBSD->Core;
    return rc;
}
Example #2
0
static void
encap_frame(struct sbsh_softc *sc, struct mbuf *m_head)
{
	struct mbuf	*m;
	u_int32_t	cur_tbd;
	int  done;

look_for_nonzero:
	for (m = m_head; !m->m_len; m = m->m_next)
		;

	cur_tbd = sc->regs->LTDR & 0x7f;
	done = 0;
	do {
		if (m->m_len < 5 || cur_tbd == ((sc->head_tdesc - 1) & 0x7f)) {
			if ((m_head = repack(sc, m_head)) != NULL)
				goto look_for_nonzero;
			else
				return;
		}

		sc->tbd[cur_tbd].address = vtophys(mtod(m, vm_offset_t));
		sc->tbd[cur_tbd].length  = m->m_len;

		do {
			m = m->m_next;
		} while (m && !m->m_len);

		if (!m) {	/* last fragment has been reached */
			sc->tbd[cur_tbd].length |= LAST_FRAG;
			done = 1;
		}

		++cur_tbd;
		cur_tbd &= 0x7f;
	} while (!done);

	sc->xq[sc->tail_xq++] = m_head;
	sc->tail_xq &= (XQLEN - 1);

	sc->regs->LTDR = cur_tbd;
	++sc->in_stats.sent_pkts;
	IFNET_STAT_INC(&sc->arpcom.ac_if, opackets, 1);
}
Example #3
0
/*
 * Search interrupt stack for a valid frame.
 */
struct frame *
checkintstack(fcor)
{
	char stack[NISP*NBPG];
	off_t off = vtophys(intstack);
	struct frame *fp;
	register caddr_t addr;

	if (off == -1 || lseek(fcor, off, L_SET) != off ||
	    read(fcor, stack, sizeof (stack)) != sizeof (stack))
		return ((struct frame *)0);
	addr = eintstack;
	do {
		addr -= sizeof (caddr_t);
		fp = (struct frame *)&stack[addr - intstack];
	} while (addr >= intstack + sizeof (struct frame) &&
	    !checkframe(fp));
	return (addr < intstack+sizeof (struct frame) ? (struct frame *)0 : fp);
}
Example #4
0
static int
virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
{
	device_t dev;
	struct vq_desc_extra *dxp;
	int i, size;

	dev = vq->vq_dev;

	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
		/*
		 * Indirect descriptors requested by the driver but not
		 * negotiated. Return zero to keep the initialization
		 * going: we'll run fine without.
		 */
		if (bootverbose)
			device_printf(dev, "virtqueue %d (%s) requested "
			    "indirect descriptors but not negotiated\n",
			    vq->vq_queue_index, vq->vq_name);
		return (0);
	}

	size = indirect_size * sizeof(struct vring_desc);
	vq->vq_max_indirect_size = indirect_size;
	vq->vq_indirect_mem_size = size;
	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;

	for (i = 0; i < vq->vq_nentries; i++) {
		dxp = &vq->vq_descx[i];

		dxp->indirect = contigmalloc(size, M_DEVBUF, M_WAITOK,
		    0, BUS_SPACE_MAXADDR, 16, 0);
		if (dxp->indirect == NULL) {
			device_printf(dev, "cannot allocate indirect list\n");
			return (ENOMEM);
		}

		dxp->indirect_paddr = vtophys(dxp->indirect);
		virtqueue_init_indirect_list(vq, dxp->indirect);
	}

	return (0);
}
Example #5
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the pcb, set up the stack so that the child
 * ready to run and return to user mode.
 */
void
cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
{
	struct pcb *pcb2;
	struct trapframe *tf;

	if ((flags & RFPROC) == 0)
		return;

	pcb2 = (struct pcb *)(td2->td_kstack +
	    td2->td_kstack_pages * PAGE_SIZE) - 1;

	td2->td_pcb = pcb2;
	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));

	td2->td_pcb->pcb_l1addr =
	    vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l1);

	tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
	bcopy(td1->td_frame, tf, sizeof(*tf));

	/* Clear syscall error flag */
	tf->tf_t[0] = 0;

	/* Arguments for child */
	tf->tf_a[0] = 0;
	tf->tf_a[1] = 0;
	tf->tf_sstatus |= (SSTATUS_SPIE); /* Enable interrupts. */
	tf->tf_sstatus |= (SSTATUS_SUM); /* Supervisor can access userspace. */
	tf->tf_sstatus &= ~(SSTATUS_SPP); /* User mode. */

	td2->td_frame = tf;

	/* Set the return value registers for fork() */
	td2->td_pcb->pcb_s[0] = (uintptr_t)fork_return;
	td2->td_pcb->pcb_s[1] = (uintptr_t)td2;
	td2->td_pcb->pcb_ra = (uintptr_t)fork_trampoline;
	td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;

	/* Setup to release spin count in fork_exit(). */
	td2->td_md.md_spinlock_count = 1;
	td2->td_md.md_saved_sstatus_ie = (SSTATUS_SIE);
}
Example #6
0
static int
opalflash_read(struct opalflash_softc *sc, off_t off,
    caddr_t data, off_t count)
{
	struct opal_msg msg;
	int rv, size, token;

	/* Ensure we write aligned to a full block size. */
	if (off % sc->sc_disk->d_sectorsize != 0 ||
	    count % sc->sc_disk->d_sectorsize != 0)
		return (EIO);

	token = opal_alloc_async_token();

	/*
	 * Read one page at a time.  It's not guaranteed that the buffer is
	 * physically contiguous.
	 */
	rv = 0;
	while (count > 0) {
		size = MIN(count, PAGE_SIZE);
		size = MIN(size, PAGE_SIZE - ((u_long)data & PAGE_MASK));
		rv = opal_call(OPAL_FLASH_READ, sc->sc_opal_id, off,
		    vtophys(data), size, token);
		if (rv == OPAL_ASYNC_COMPLETION) {
			rv = opal_wait_completion(&msg, sizeof(msg), token);
			if (rv == OPAL_SUCCESS)
				rv = msg.params[1];
		}
		if (rv != OPAL_SUCCESS)
			break;
		count -= size;
		off += size;
		data += size;
	}
	opal_free_async_token(token);
	if (rv == OPAL_SUCCESS)
		rv = 0;
	else
		rv = EIO;

	return (rv);
}
Example #7
0
/*
 * Functions to map and unmap memory non-cached into KVA the kernel won't try
 * to allocate. The goal is to provide uncached memory to busdma, to honor
 * BUS_DMA_COHERENT.
 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
 * The allocator is rather dummy, each page is represented by a bit in
 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
 * As soon as it finds enough contiguous pages to satisfy the request,
 * it returns the address.
 */
void *
arm_remap_nocache(void *addr, vm_size_t size)
{
	int i, j;

	size = round_page(size);
	for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) {
		if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
		    BITS_PER_INT)))) {
			for (j = i; j < i + (size / (PAGE_SIZE)); j++)
				if (arm_nocache_allocated[j / BITS_PER_INT] &
				    (1 << (j % BITS_PER_INT)))
					break;
			if (j == i + (size / (PAGE_SIZE)))
				break;
		}
	}
	if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) {
		vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
		void *ret = (void *)tomap;
		vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
		vm_offset_t vaddr = (vm_offset_t) addr;
		
		vaddr = vaddr & ~PAGE_MASK;
		for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
		    vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) {
			cpu_idcache_wbinv_range(vaddr, PAGE_SIZE);
#ifdef ARM_L2_PIPT
			cpu_l2cache_wbinv_range(physaddr, PAGE_SIZE);
#else
			cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE);
#endif
			pmap_kenter_nocache(tomap, physaddr);
			cpu_tlb_flushID_SE(vaddr);
			arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
			    BITS_PER_INT);
		}
		return (ret);
	}

	return (NULL);
}
Example #8
0
/*
 * allow user processes to MMAP some memory sections
 * instead of going through read/write
 */
int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int prot, vm_memattr_t *memattr)
{
	int i;

	if (dev2unit(dev) == CDEV_MINOR_MEM)
		*paddr = offset;
	else if (dev2unit(dev) == CDEV_MINOR_KMEM)
		*paddr = vtophys(offset);
	else
		return (EFAULT);

	for (i = 0; i < mem_range_softc.mr_ndesc; i++) {
		if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE))
			continue;

		if (offset >= mem_range_softc.mr_desc[i].mr_base &&
		    offset < mem_range_softc.mr_desc[i].mr_base +
		    mem_range_softc.mr_desc[i].mr_len) {
			switch (mem_range_softc.mr_desc[i].mr_flags &
			    MDF_ATTRMASK) {
			case MDF_WRITEBACK:
				*memattr = VM_MEMATTR_WRITE_BACK;
				break;
			case MDF_WRITECOMBINE:
				*memattr = VM_MEMATTR_WRITE_COMBINING;
				break;
			case MDF_UNCACHEABLE:
				*memattr = VM_MEMATTR_UNCACHEABLE;
				break;
			case MDF_WRITETHROUGH:
				*memattr = VM_MEMATTR_WRITE_THROUGH;
				break;
			}

			break;
		}
	}

	return (0);
}
Example #9
0
void *
oss_contig_malloc (unsigned long buffsize, unsigned long memlimit,
		   oss_native_word * phaddr)
{
  char *tmpbuf;
  *phaddr = 0;

  tmpbuf =
    (char *) contigmalloc (buffsize, M_DEVBUF, M_WAITOK, 0ul, memlimit,
			   PAGE_SIZE, 0ul);
  if (tmpbuf == NULL)
    {
      cmn_err (CE_CONT, "OSS: Unable to allocate %lu bytes for a DMA buffer\n",
	       buffsize);
      cmn_err (CE_CONT, "run soundoff and run soundon again.\n");
      return NULL;
    }
  *phaddr = vtophys (tmpbuf);
  return tmpbuf;
}
Example #10
0
/*
 * Common function for DMA-safe memory allocation.  May be called
 * by bus-specific DMA memory allocation functions.
 */
static int
nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
    bus_dmamap_t *mapp)
{
	int mflags;

	if (flags & BUS_DMA_NOWAIT)
		mflags = M_NOWAIT;
	else
		mflags = M_WAITOK;
	if (flags & BUS_DMA_ZERO)
		mflags |= M_ZERO;

	/*
	 * XXX:
	 * (dmat->dt_alignment < dmat->dt_maxsize) is just a quick hack; the
	 * exact alignment guarantees of malloc need to be nailed down, and
	 * the code below should be rewritten to take that into account.
	 *
	 * In the meantime, we'll warn the user if malloc gets it wrong.
	 */
	if (dmat->dt_maxsize <= PAGE_SIZE &&
	    dmat->dt_alignment < dmat->dt_maxsize)
		*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
	else {
		/*
		 * XXX use contigmalloc until it is merged into this
		 * facility and handles multi-seg allocations.  Nobody
		 * is doing multi-seg allocations yet though.
		 */
		*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
		    0ul, dmat->dt_lowaddr,
		    dmat->dt_alignment ? dmat->dt_alignment : 1UL,
		    dmat->dt_boundary);
	}
	if (*vaddr == NULL)
		return (ENOMEM);
	if (vtophys(*vaddr) % dmat->dt_alignment)
		printf("%s: failed to align memory properly.\n", __func__);
	return (0);
}
Example #11
0
void *
x86bios_alloc(uint32_t *offset, size_t size, int flags)
{
	void *vaddr;
	int i;

	if (offset == NULL || size == 0)
		return (NULL);
	vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
	    PAGE_SIZE, 0);
	if (vaddr != NULL) {
		*offset = vtophys(vaddr);
		mtx_lock(&x86bios_lock);
		for (i = 0; i < atop(round_page(size)); i++)
			vm86_addpage(&x86bios_vmc, atop(*offset) + i,
			    (vm_offset_t)vaddr + ptoa(i));
		mtx_unlock(&x86bios_lock);
	}

	return (vaddr);
}
Example #12
0
/*******************************************************\
* allow user processes to MMAP some memory sections	*
* instead of going through read/write			*
\*******************************************************/
static int
memmmap(dev_t dev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
{
	switch (minor(dev))
	{

	/* minor device 0 is physical memory */
	case 0:
		*paddr = offset;
		break;

	/* minor device 1 is kernel memory */
	case 1:
        	*paddr = vtophys(offset);
		break;

	default:
		return (-1);
	}
	return (0);
}
Example #13
0
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    gfp_t flag)
{
	vm_paddr_t high;
	size_t align;
	void *mem;

#if 0 /* XXX swildner */
	if (dev->dma_mask)
		high = *dev->dma_mask;
	else
#endif
		high = BUS_SPACE_MAXADDR_32BIT;
	align = PAGE_SIZE << get_order(size);
	mem = (void *)kmem_alloc_contig(size, 0, high, align);
	if (mem)
		*dma_handle = vtophys(mem);
	else
		*dma_handle = 0;
	return (mem);
}
Example #14
0
static int
memuksmap(cdev_t dev, vm_page_t fake)
{
	vm_ooffset_t result;
	int error;

	switch (minor(dev)) {
	case 0:
		/*
		 * minor device 0 is physical memory
		 */
		fake->phys_addr = ptoa(fake->pindex);
		error = 0;
		break;
	case 1:
		/*
		 * minor device 1 is kernel memory
		 */
		fake->phys_addr = vtophys(ptoa(fake->pindex));
		error = 0;
		break;
	case 5:
	case 6:
		/*
		 * minor device 5 is /dev/upmap (see sys/upmap.h)
		 * minor device 6 is /dev/kpmap (see sys/upmap.h)
		 */
		result = 0;
		error = user_kernel_mapping(minor(dev),
					    ptoa(fake->pindex), &result);
		fake->phys_addr = result;
		break;
	default:
		error = EINVAL;
		break;
	}
	return error;
}
Example #15
0
static int
memmmap(struct dev_mmap_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	vm_ooffset_t result;
	int error;

	switch (minor(dev)) {
	case 0:
		/* 
		 * minor device 0 is physical memory 
		 */
		ap->a_result = atop(ap->a_offset);
		error = 0;
		break;
	case 1:
		/*
		 * minor device 1 is kernel memory 
		 */
		ap->a_result = atop(vtophys(ap->a_offset));
		error = 0;
		break;
	case 5:
	case 6:
		/*
		 * minor device 5 is /dev/upmap (see sys/upmap.h)
		 * minor device 6 is /dev/kpmap (see sys/upmap.h)
		 */
		result = 0;
		error = user_kernel_mapping(minor(dev), ap->a_offset, &result);
		ap->a_result = atop(result);
		break;
	default:
		error = EINVAL;
		break;
	}
	return error;
}
Example #16
0
static int
ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size)
{
	struct ntb_transport_mw *mw = &nt->mw[num_mw];

	/* Alloc memory for receiving data.  Must be 4k aligned */
	mw->size = size;

	mw->virt_addr = contigmalloc(mw->size, M_NTB_IF, M_ZERO, 0,
	    BUS_SPACE_MAXADDR, mw->size, 0);
	if (mw->virt_addr == NULL) {
		printf("ntb: Unable to allocate MW buffer of size %d\n",
		    (int)mw->size);
		return (ENOMEM);
	}
	/* TODO: replace with bus_space_* functions */
	mw->dma_addr = vtophys(mw->virt_addr);

	/* Notify HW the memory location of the receive buffer */
	ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr);

	return (0);
}
Example #17
0
void
uma_small_free(void *mem, int size, u_int8_t flags)
{
	pd_entry_t *pd;
	pt_entry_t *pt;

	if (flags & UMA_SLAB_KMEM)
		kmem_free(kmem_map, (vm_offset_t)mem, size);
	else {
		struct arm_small_page *sp;

		if ((vm_offset_t)mem >= KERNBASE) {
			mtx_lock(&smallalloc_mtx);
			sp = TAILQ_FIRST(&free_pgdesc);
			KASSERT(sp != NULL, ("No more free page descriptor ?"));
			TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
			sp->addr = mem;
			pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
			    &pt);
			if ((*pd & pte_l1_s_cache_mask) == 
			    pte_l1_s_cache_mode_pt &&
			    pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
				TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
			else
				TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
			mtx_unlock(&smallalloc_mtx);
		} else {
			vm_page_t m;
			vm_paddr_t pa = vtophys((vm_offset_t)mem);

			m = PHYS_TO_VM_PAGE(pa);
			m->wire_count--;
			vm_page_free(m);
			atomic_subtract_int(&cnt.v_wire_count, 1);
		}
	}
}
Example #18
0
/****************************************************************************
 * print_option_record
 *
 * Display "option" record from CMOS option table.
 ****************************************************************************/
static void print_option_record(const struct cmos_entries *cmos_entry)
{
	static const size_t S_BUFSIZE = 80;
	char s[S_BUFSIZE];

	switch (cmos_entry->config) {
	case 'e':
		strcpy(s, "ENUM");
		break;

	case 'h':
		strcpy(s, "HEX");
		break;

	case 'r':
		strcpy(s, "RESERVED");
		break;

	default:
		snprintf(s, S_BUFSIZE, "UNKNOWN: value is 0x%x (decimal: %d)",
			 cmos_entry->config, cmos_entry->config);
		break;
	}

	printf("    OPTION record at physical address 0x%lx:\n"
	       "        tag:       0x%x (decimal: %d)\n"
	       "        size:      0x%x (decimal: %d)\n"
	       "        bit:       0x%x (decimal: %d)\n"
	       "        length:    0x%x (decimal: %d)\n"
	       "        config:    %s\n"
	       "        config_id: 0x%x (decimal: %d)\n"
	       "        name:      %s\n",
	       vtophys(cmos_entry), cmos_entry->tag, cmos_entry->tag,
	       cmos_entry->size, cmos_entry->size, cmos_entry->bit,
	       cmos_entry->bit, cmos_entry->length, cmos_entry->length, s,
	       cmos_entry->config_id, cmos_entry->config_id, cmos_entry->name);
}
Example #19
0
/*
 * Allocate and fill in the hypcall page.
 */
int
xen_hvm_init_hypercall_stubs(enum xen_hvm_init_type init_type)
{
	uint32_t regs[4];

	/* Legacy PVH will get here without the cpuid leaf being set. */
	if (cpuid_base == 0)
		cpuid_base = xen_hvm_cpuid_base();
	if (cpuid_base == 0)
		return (ENXIO);

	if (xen_domain() && init_type == XEN_HVM_INIT_LATE) {
		/*
		 * If the domain type is already set we can assume that the
		 * hypercall page has been populated too, so just print the
		 * version (and apply any quirks) and exit.
		 */
		hypervisor_version();
		return 0;
	}

	if (init_type == XEN_HVM_INIT_LATE)
		hypervisor_version();

	/*
	 * Find the hypercall pages.
	 */
	do_cpuid(cpuid_base + 2, regs);
	if (regs[0] != 1)
		return (EINVAL);

	wrmsr(regs[1], (init_type == XEN_HVM_INIT_EARLY)
	    ? ((vm_paddr_t)&hypercall_page - KERNBASE)
	    : vtophys(&hypercall_page));

	return (0);
}
Example #20
0
/*
 * Finish a fork operation, with lwp lp2 nearly set up.
 * Copy and update the pcb, set up the stack so that the child
 * ready to run and return to user mode.
 */
void
cpu_fork(struct lwp *lp1, struct lwp *lp2, int flags)
{
	struct pcb *pcb2;

	if ((flags & RFPROC) == 0) {
		if ((flags & RFMEM) == 0) {
			/* unshare user LDT */
			struct pcb *pcb1 = lp1->lwp_thread->td_pcb;
			struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt;
			if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) {
				pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len);
				user_ldt_free(pcb1);
				pcb1->pcb_ldt = pcb_ldt;
				set_user_ldt(pcb1);
			}
		}
		return;
	}

#if NNPX > 0
	/* Ensure that lp1's pcb is up to date. */
	if (mdcpu->gd_npxthread == lp1->lwp_thread)
		npxsave(lp1->lwp_thread->td_savefpu);
#endif
	
	/*
	 * Copy lp1's PCB.  This really only applies to the
	 * debug registers and FP state, but its faster to just copy the
	 * whole thing.  Because we only save the PCB at switchout time,
	 * the register state may not be current.
	 */
	pcb2 = lp2->lwp_thread->td_pcb;
	*pcb2 = *lp1->lwp_thread->td_pcb;

	/*
	 * Create a new fresh stack for the new process.
	 * Copy the trap frame for the return to user mode as if from a
	 * syscall.  This copies the user mode register values.  The
	 * 16 byte offset saves space for vm86, and must match 
	 * common_tss.esp0 (kernel stack pointer on entry from user mode)
	 *
	 * pcb_esp must allocate an additional call-return pointer below
	 * the trap frame which will be restored by cpu_restore from
	 * PCB_EIP, and the thread's td_sp pointer must allocate an
	 * additonal two worsd below the pcb_esp call-return pointer to
	 * hold the LWKT restore function pointer and eflags.
	 *
	 * The LWKT restore function pointer must be set to cpu_restore,
	 * which is our standard heavy weight process switch-in function.
	 * YYY eventually we should shortcut fork_return and fork_trampoline
	 * to use the LWKT restore function directly so we can get rid of
	 * all the extra crap we are setting up.
	 */
	lp2->lwp_md.md_regs = (struct trapframe *)((char *)pcb2 - 16) - 1;
	bcopy(lp1->lwp_md.md_regs, lp2->lwp_md.md_regs, sizeof(*lp2->lwp_md.md_regs));

	/*
	 * Set registers for trampoline to user mode.  Leave space for the
	 * return address on stack.  These are the kernel mode register values.
	 */
	pcb2->pcb_cr3 = vtophys(vmspace_pmap(lp2->lwp_proc->p_vmspace)->pm_pdir);
	pcb2->pcb_edi = 0;
	pcb2->pcb_esi = (int)fork_return;	/* fork_trampoline argument */
	pcb2->pcb_ebp = 0;
	pcb2->pcb_esp = (int)lp2->lwp_md.md_regs - sizeof(void *);
	pcb2->pcb_ebx = (int)lp2;		/* fork_trampoline argument */
	pcb2->pcb_eip = (int)fork_trampoline;
	lp2->lwp_thread->td_sp = (char *)(pcb2->pcb_esp - sizeof(void *));
	*(u_int32_t *)lp2->lwp_thread->td_sp = PSL_USER;
	lp2->lwp_thread->td_sp -= sizeof(void *);
	*(void **)lp2->lwp_thread->td_sp = (void *)cpu_heavy_restore;

	/*
	 * pcb2->pcb_ldt:	duplicated below, if necessary.
	 * pcb2->pcb_savefpu:	cloned above.
	 * pcb2->pcb_flags:	cloned above (always 0 here).
	 * pcb2->pcb_onfault:	cloned above (always NULL here).
	 * pcb2->pcb_onfault_sp:cloned above (don't care)
	 */

	/*
	 * XXX don't copy the i/o pages.  this should probably be fixed.
	 */
	pcb2->pcb_ext = NULL;

        /* Copy the LDT, if necessary. */
        if (pcb2->pcb_ldt != NULL) {
		if (flags & RFMEM) {
			pcb2->pcb_ldt->ldt_refcnt++;
		} else {
			pcb2->pcb_ldt = user_ldt_alloc(pcb2,
				pcb2->pcb_ldt->ldt_len);
		}
        }
	bcopy(&lp1->lwp_thread->td_tls, &lp2->lwp_thread->td_tls,
	      sizeof(lp2->lwp_thread->td_tls));
	/*
	 * Now, cpu_switch() can schedule the new lwp.
	 * pcb_esp is loaded pointing to the cpu_switch() stack frame
	 * containing the return address when exiting cpu_switch.
	 * This will normally be to fork_trampoline(), which will have
	 * %ebx loaded with the new lwp's pointer.  fork_trampoline()
	 * will set up a stack to call fork_return(lp, frame); to complete
	 * the return to user-mode.
	 */
}
Example #21
0
File: mv.c Project: MarginC/kame
/*
 * XXX
 *
 * The binary raid.obj requires this function!
 */
ULONG_PTR HPTLIBAPI fOsPhysicalAddress(void *addr)
{
	return (ULONG_PTR)(vtophys(addr));
}
Example #22
0
vm_paddr_t
virtqueue_paddr(struct virtqueue *vq)
{
	return (vtophys(vq->vq_ring_mem));
}
Example #23
0
int rhine_init(rhine *r)
{
	bigtime_t time;
	int err = -1;
	addr_t temp;
	int i;

	dprintf("rhine_init: r %p\n", r);

	r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rhine_region", (void **)&r->virt_base,
		REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
	if(r->region < 0) {
		dprintf("rhine_init: error creating memory mapped region\n");
		err = -1;
		goto err;
	}
	dprintf("rhine mapped at address 0x%lx\n", r->virt_base);

	/* create regions for tx and rx descriptors */
	r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_rxdesc", (void **)&r->rxdesc,
		REGION_ADDR_ANY_ADDRESS, RXDESC_COUNT * sizeof(struct rhine_rx_desc), REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->rxdesc_phys = vtophys(r->rxdesc);
	dprintf("rhine: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
	r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_txdesc", (void **)&r->txdesc,
		REGION_ADDR_ANY_ADDRESS, TXDESC_COUNT * sizeof(struct rhine_tx_desc), REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->txdesc_phys = vtophys(r->txdesc);
	dprintf("rhine: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
	r->reg_spinlock = 0;

	/* stick all rx and tx buffers in a circular buffer */
	for (i=0; i < RXDESC_COUNT; i++) {
		RXDESC(r, i).status = 0;
		RXDESC(r, i).framelen = 0;
		RXDESC(r, i).buflen = 0;
		RXDESC(r, i).ptr = 0;
		if (i == RXDESC_COUNT-1)
			RXDESC(r, i).next = RXDESC_PHYS(r, 0);
		else
			RXDESC(r, i).next = RXDESC_PHYS(r, i + 1);
	}
	// XXX do same for tx


	r->rx_head = r->rx_tail = 0;

	/* reset the chip */
	time = system_time();
	RHINE_WRITE_16(r, RHINE_CR0, 0x8000); // reset the chip
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			break;
		}
	} while(RHINE_READ_16(r, RHINE_CR0) & 0x8000);

	if (RHINE_READ_16(r, RHINE_CR0) & 0x8000) {
		dprintf("chip didn't reset, trying alternate method\n");
		RHINE_SETBITS_8(r, RHINE_MISC_CR1, 0x40);
		thread_snooze(10000);
	}

	/* read in the mac address */
	RHINE_WRITE_8(r, RHINE_EECSR, RHINE_READ_8(r, RHINE_EECSR) | (1<<5));
	r->mac_addr[0] = RHINE_READ_8(r, RHINE_PAR0); 
	r->mac_addr[1] = RHINE_READ_8(r, RHINE_PAR1);
   	r->mac_addr[2] = RHINE_READ_8(r, RHINE_PAR2);
	r->mac_addr[3] = RHINE_READ_8(r, RHINE_PAR3);
   	r->mac_addr[4] = RHINE_READ_8(r, RHINE_PAR4);
   	r->mac_addr[5] = RHINE_READ_8(r, RHINE_PAR5);
  	dprintf("rhine: mac addr %x:%x:%x:%x:%x:%x\n",
  		r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
  		r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

	/* set up the rx state */
	/* 64 byte fifo threshold, all physical/broadcast/multicast/small/error packets accepted */
	RHINE_WRITE_8(r, RHINE_RCR, (0<<5) | (1<<4) | (1<<3) | (1<<2) | (1<<1) | (1<<0));
	RHINE_WRITE_32(r, RHINE_RDA0, RXDESC_PHYS(r, r->rx_head));

	/* set up tx state */
	/* 64 byte fifo, default backup, default loopback mode */
	RHINE_WRITE_8(r, RHINE_TCR, 0);

	/* mask all interrupts */
	RHINE_WRITE_16(r, RHINE_IMR0, 0);

	/* clear all pending interrupts */
	RHINE_WRITE_16(r, RHINE_ISR0, 0xffff);
	
	/* set up the interrupt handler */
	int_set_io_interrupt_handler(r->irq, &rhine_int, r, "rhine");

	{
		static uint8 buf[2048];
		RXDESC(r, r->rx_tail).ptr = vtophys(buf);
		RXDESC(r, r->rx_tail).buflen = sizeof(buf);
		RXDESC(r, r->rx_tail).status = 0;
		RXDESC(r, r->rx_tail).framelen = RHINE_RX_OWNER;
		r->rx_tail++;

		RHINE_WRITE_16(r, RHINE_CR0, (1<<1) | (1<<3) | (1<<6));
	}

	/* unmask all interrupts */
	RHINE_WRITE_16(r, RHINE_IMR0, 0xffff);	

#if 0
	// try to reset the device
 	time = system_time();
	RTL_WRITE_8(r, RT_CHIPCMD, RT_CMD_RESET);
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			err = -1;
			goto err1;
		}
	} while((RTL_READ_8(r, RT_CHIPCMD) & RT_CMD_RESET));

	// create a rx and tx buf
	r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_rxbuf", (void **)&r->rxbuf,
		REGION_ADDR_ANY_ADDRESS, 64*1024 + 16, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_txbuf", (void **)&r->txbuf,
		REGION_ADDR_ANY_ADDRESS, 8*1024, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);

	// set up the transmission buf and sem
	r->tx_sem = sem_create(4, "rhine_txsem");
	mutex_init(&r->lock, "rhine");
	r->txbn = 0;
	r->last_txbn = 0;
	r->rx_sem = sem_create(0, "rhine_rxsem");
	r->reg_spinlock = 0;

	// set up the interrupt handler
	int_set_io_interrupt_handler(r->irq, &rhine_int, r, "rhine");

	// read the mac address
	r->mac_addr[0] = RTL_READ_8(r, RT_IDR0);
	r->mac_addr[1] = RTL_READ_8(r, RT_IDR0 + 1);
	r->mac_addr[2] = RTL_READ_8(r, RT_IDR0 + 2);
	r->mac_addr[3] = RTL_READ_8(r, RT_IDR0 + 3);
  	r->mac_addr[4] = RTL_READ_8(r, RT_IDR0 + 4);
  	r->mac_addr[5] = RTL_READ_8(r, RT_IDR0 + 5);

  	dprintf("rhine: mac addr %x:%x:%x:%x:%x:%x\n",
  		r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
  		r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

	// enable writing to the config registers
	RTL_WRITE_8(r, RT_CFG9346, 0xc0);

	// reset config 1
	RTL_WRITE_8(r, RT_CONFIG1, 0);

	// Enable receive and transmit functions
	RTL_WRITE_8(r, RT_CHIPCMD, RT_CMD_RX_ENABLE | RT_CMD_TX_ENABLE);

	// Set Rx FIFO threashold to 256, Rx size to 64k+16, 256 byte DMA burst
	RTL_WRITE_32(r, RT_RXCONFIG, 0x00009c00);

	// Set Tx 256 byte DMA burst
	RTL_WRITE_32(r, RT_TXCONFIG, 0x03000400);

	// Turn off lan-wake and set the driver-loaded bit
	RTL_WRITE_8(r, RT_CONFIG1, (RTL_READ_8(r, RT_CONFIG1) & ~0x30) | 0x20);

	// Enable FIFO auto-clear
	RTL_WRITE_8(r, RT_CONFIG4, RTL_READ_8(r, RT_CONFIG4) | 0x80);

	// go back to normal mode
	RTL_WRITE_8(r, RT_CFG9346, 0);

	// Setup RX buffers
	*(int *)r->rxbuf = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), r->rxbuf, &temp);
	dprintf("rx buffer will be at 0x%lx\n", temp);
	RTL_WRITE_32(r, RT_RXBUF, temp);

	// Setup TX buffers
	dprintf("tx buffer (virtual) is at 0x%lx\n", r->txbuf);
	*(int *)r->txbuf = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), r->txbuf, &temp);
	RTL_WRITE_32(r, RT_TXADDR0, temp);
	RTL_WRITE_32(r, RT_TXADDR1, temp + 2*1024);
	dprintf("first half of txbuf at 0x%lx\n", temp);
	*(int *)(r->txbuf + 4*1024) = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), r->txbuf + 4*1024, &temp);
	RTL_WRITE_32(r, RT_TXADDR2, temp);
	RTL_WRITE_32(r, RT_TXADDR3, temp + 2*1024);
	dprintf("second half of txbuf at 0x%lx\n", temp);

/*
	RTL_WRITE_32(r, RT_TXSTATUS0, RTL_READ_32(r, RT_TXSTATUS0) | 0xfffff000);
	RTL_WRITE_32(r, RT_TXSTATUS1, RTL_READ_32(r, RT_TXSTATUS1) | 0xfffff000);
	RTL_WRITE_32(r, RT_TXSTATUS2, RTL_READ_32(r, RT_TXSTATUS2) | 0xfffff000);
	RTL_WRITE_32(r, RT_TXSTATUS3, RTL_READ_32(r, RT_TXSTATUS3) | 0xfffff000);
*/
	// Reset RXMISSED counter
	RTL_WRITE_32(r, RT_RXMISSED, 0);

	// Enable receiving broadcast and physical match packets
//	RTL_WRITE_32(r, RT_RXCONFIG, RTL_READ_32(r, RT_RXCONFIG) | 0x0000000a);
	RTL_WRITE_32(r, RT_RXCONFIG, RTL_READ_32(r, RT_RXCONFIG) | 0x0000000f);

	// Filter out all multicast packets
	RTL_WRITE_32(r, RT_MAR0, 0);
	RTL_WRITE_32(r, RT_MAR0 + 4, 0);

	// Disable all multi-interrupts
	RTL_WRITE_16(r, RT_MULTIINTR, 0);

	RTL_WRITE_16(r, RT_INTRMASK, MYRT_INTS);
//	RTL_WRITE_16(r, RT_INTRMASK, 0x807f);

	// Enable RX/TX once more
	RTL_WRITE_8(r, RT_CHIPCMD, RT_CMD_RX_ENABLE | RT_CMD_TX_ENABLE);

	RTL_WRITE_8(r, RT_CFG9346, 0);
#endif

	return 0;

err1:
	vm_delete_region(vm_get_kernel_aspace_id(), r->region);
err:
	return err;
}
Example #24
0
static int
aau_bzero(void *dst, int len, int flags)
{
	struct i80321_aau_softc *sc = aau_softc;
	i80321_aaudesc_t *desc;
	int ret;
	int csr;
	int descnb = 0;
	int tmplen = len;
	int to_nextpagedst;
	int min_hop;
	vm_paddr_t pa, tmppa;

	if (!sc)
		return (-1);
	mtx_lock_spin(&sc->mtx);
	if (sc->flags & BUSY) {
		mtx_unlock_spin(&sc->mtx);
		return (-1);
	}
	sc->flags |= BUSY;
	mtx_unlock_spin(&sc->mtx);
	desc = sc->aauring[0].desc;
	if (flags & IS_PHYSICAL) {
		desc->local_addr = (vm_paddr_t)dst;
		desc->next_desc = 0;
		desc->count = len;
		desc->descr_ctrl = 2 << 1 | 1 << 31; /* Fill, enable dest write */
		bus_dmamap_sync(sc->dmatag, sc->aauring[0].map,
		    BUS_DMASYNC_PREWRITE);
	} else {
		test_virt_addr(dst, len);
		if ((vm_offset_t)dst & (31))
			cpu_dcache_wb_range((vm_offset_t)dst & ~31, 32);
		if (((vm_offset_t)dst + len) & 31)
			cpu_dcache_wb_range(((vm_offset_t)dst + len) & ~31,
			    32);
		cpu_dcache_inv_range((vm_offset_t)dst, len);
		while (tmplen > 0) {
			pa = vtophys(dst);
			to_nextpagedst = ((vm_offset_t)dst & ~PAGE_MASK) +
			    PAGE_SIZE - (vm_offset_t)dst;
			while (to_nextpagedst < tmplen) {
				tmppa = vtophys((vm_offset_t)dst +
				    to_nextpagedst);
				if (tmppa != pa + to_nextpagedst)
					break;
				to_nextpagedst += PAGE_SIZE;
			}
			min_hop = to_nextpagedst;
			if (min_hop < 64) {
				tmplen -= min_hop;
				bzero(dst, min_hop);
				cpu_dcache_wbinv_range((vm_offset_t)dst,
				    min_hop);

				dst = (void *)((vm_offset_t)dst + min_hop);
				if (tmplen <= 0 && descnb > 0) {
					sc->aauring[descnb - 1].desc->next_desc
					    = 0;
					bus_dmamap_sync(sc->dmatag,
					    sc->aauring[descnb - 1].map,
					    BUS_DMASYNC_PREWRITE);
				}
				continue;
			}
			desc->local_addr = pa;
			desc->count = tmplen > min_hop ? min_hop : tmplen;
			desc->descr_ctrl = 2 << 1 | 1 << 31; /* Fill, enable dest write */;
			if (min_hop < tmplen) {
				tmplen -= min_hop;
				dst = (void *)((vm_offset_t)dst + min_hop);
			} else
				tmplen = 0;
			if (descnb + 1 >= AAU_RING_SIZE) {
				mtx_lock_spin(&sc->mtx);
				sc->flags &= ~BUSY;
				mtx_unlock_spin(&sc->mtx);
				return (-1);
			}
			if (tmplen > 0) {
				desc->next_desc = sc->aauring[descnb + 1].
				    phys_addr;
				bus_dmamap_sync(sc->dmatag,
				    sc->aauring[descnb].map,
				    BUS_DMASYNC_PREWRITE);
				desc = sc->aauring[descnb + 1].desc;
				descnb++;
			} else {
				desc->next_desc = 0;
				bus_dmamap_sync(sc->dmatag,
				    sc->aauring[descnb].map,
				    BUS_DMASYNC_PREWRITE);
			}
									
		}

	}
	AAU_REG_WRITE(sc, 0x0c /* Descriptor addr */,
	    sc->aauring[0].phys_addr);
	AAU_REG_WRITE(sc, 0 /* Control register */, 1 << 0/* Start transfer */);
	while ((csr = AAU_REG_READ(sc, 0x4)) & (1 << 10));
	/* Wait until it's done. */
	if (csr & (1 << 5)) /* error */
		ret = -1;
	else
		ret = 0;
	/* Clear the interrupt. */
	AAU_REG_WRITE(sc, 0x4, csr);
	/* Stop the AAU. */
	AAU_REG_WRITE(sc, 0, 0);
	mtx_lock_spin(&sc->mtx);
	sc->flags &= ~BUSY;
	mtx_unlock_spin(&sc->mtx);
	return (ret);
}
static int rtl8169_init(rtl8169 *r)
{
    //bigtime_t time;
    int err = -1;
    //addr_t temp;
    //int i;

    hal_mutex_init(&r->lock,DEBUG_MSG_PREFIX);


    SHOW_FLOW(2, "rtl8169_init: r %p\n", r);

    /*
     r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base, REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
    if(r->region < 0) {
        SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n");
        err = -1;
        goto err;
    }*/

    size_t n_pages = BYTES_TO_PAGES(r->phys_size);

    hal_alloc_vaddress( (void **)&r->virt_base, n_pages); // alloc address of a page, but not memory
    hal_pages_control_etc( r->phys_base, (void *)r->virt_base, n_pages, page_map_io, page_rw, 0 );

    SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base);

#if 0
    /* create regions for tx and rx descriptors */
    r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc,
                                                  REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
    r->rxdesc_phys = vtophys(r->rxdesc);
    SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
    r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc,
                                                  REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
    r->txdesc_phys = vtophys(r->txdesc);
    SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
    r->reg_spinlock = 0;

    /* create a large tx and rx buffer for the descriptors to point to */
    r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf,
                                                 REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
    r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf,
                                                 REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
#endif

    hal_pv_alloc( &r->rxdesc_phys, (void**)&r->rxdesc, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN );
    hal_pv_alloc( &r->txdesc_phys, (void**)&r->txdesc, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN );

    SHOW_INFO(2, "rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
    SHOW_INFO(2, "tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);

    hal_pv_alloc( &r->rxbuf_phys, (void**)&r->rxbuf, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME );
    hal_pv_alloc( &r->txbuf_phys, (void**)&r->txbuf, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME );

    /* create a receive sem */
    hal_sem_init( &r->rx_sem, "rtl8169 rx_sem");

    /* transmit sem */
    hal_sem_init(  &r->tx_sem, "rtl8169 tx_sem");

    /* reset the chip */
    int repeats = 100;
    RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx
    do {
        hal_sleep_msec(10); // 10ms
        if(repeats -- <= 0 )
            break;
    } while(RTL_READ_8(r, REG_CR) & (1<<4));

    /* read in the mac address */
    r->mac_addr[0] = RTL_READ_8(r, REG_IDR0);
    r->mac_addr[1] = RTL_READ_8(r, REG_IDR1);
    r->mac_addr[2] = RTL_READ_8(r, REG_IDR2);
    r->mac_addr[3] = RTL_READ_8(r, REG_IDR3);
    r->mac_addr[4] = RTL_READ_8(r, REG_IDR4);
    r->mac_addr[5] = RTL_READ_8(r, REG_IDR5);
    SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n",
              r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
              r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

    /* some voodoo from BSD driver */
    RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR));
    RTL_SETBITS_16(r, REG_CCR, 0x3);

    /* mask all interrupts */
    RTL_WRITE_16(r, REG_IMR, 0);

    /* set up the tx/rx descriptors */
    rtl8169_setup_descriptors(r);

    /* enable tx/rx */
    RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2));

    /* set up the rx state */
    /* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */
    RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0));
    RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable
    RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu

    /* set up the tx state */
    RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma
    RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu)

    /* set up the interrupt handler */
    //int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169");
    if(hal_irq_alloc( r->irq, &rtl8169_int, r, HAL_IRQ_SHAREABLE ))
    {
        SHOW_ERROR( 0, "unable to allocate irq %d", r->irq );
        goto err1;
    }

    /* clear all pending interrupts */
    RTL_WRITE_16(r, REG_ISR, 0xffff);

    /* unmask interesting interrupts */
    RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL);

    return 0;

err1:
    // TODO free what?
    //vm_delete_region(vm_get_kernel_aspace_id(), r->region);
//err:
    return err;
}
Example #26
0
int rtl8169_init(rtl8169 *r)
{
	bigtime_t time;
	int err = -1;
	addr_t temp;
	int i;

	SHOW_FLOW(2, "rtl8169_init: r %p\n", r);

	r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base,
		REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
	if(r->region < 0) {
		SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n");
		err = -1;
		goto err;
	}
	SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base);

	/* create regions for tx and rx descriptors */
	r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc,
		REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->rxdesc_phys = vtophys(r->rxdesc);
	SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
	r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc,
		REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->txdesc_phys = vtophys(r->txdesc);
	SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
	r->reg_spinlock = 0;

	/* create a large tx and rx buffer for the descriptors to point to */
	r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf,
			REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
	r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf,
			REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);

	/* create a receive sem */
	r->rx_sem = sem_create(0, "rtl8169 rx_sem");

	/* transmit sem */
	r->tx_sem = sem_create(1, "rtl8169 tx_sem");

	/* reset the chip */
	time = system_time();
	RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			break;
		}
	} while(RTL_READ_8(r, REG_CR) & (1<<4));

	/* read in the mac address */
	r->mac_addr[0] = RTL_READ_8(r, REG_IDR0); 
	r->mac_addr[1] = RTL_READ_8(r, REG_IDR1);
   	r->mac_addr[2] = RTL_READ_8(r, REG_IDR2);
	r->mac_addr[3] = RTL_READ_8(r, REG_IDR3);
   	r->mac_addr[4] = RTL_READ_8(r, REG_IDR4);
   	r->mac_addr[5] = RTL_READ_8(r, REG_IDR5);
  	SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n",
  		r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
  		r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

	/* some voodoo from BSD driver */
	RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR));
	RTL_SETBITS_16(r, REG_CCR, 0x3);

	/* mask all interrupts */
	RTL_WRITE_16(r, REG_IMR, 0);

	/* set up the tx/rx descriptors */
	rtl8169_setup_descriptors(r);

	/* enable tx/rx */
	RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2));

	/* set up the rx state */
	/* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */
	RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0)); 
	RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable
	RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu

	/* set up the tx state */
	RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma
	RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu)

	/* set up the interrupt handler */
	int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169");

	/* clear all pending interrupts */
	RTL_WRITE_16(r, REG_ISR, 0xffff);
	
	/* unmask interesting interrupts */
	RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL);

	return 0;

err1:
	vm_delete_region(vm_get_kernel_aspace_id(), r->region);
err:
	return err;
}
Example #27
0
static int
load_fw(struct tegra_xhci_softc *sc)
{
	const struct firmware *fw;
	const struct tegra_xusb_fw_hdr *fw_hdr;
	vm_paddr_t fw_paddr, fw_base;
	vm_offset_t fw_vaddr;
	vm_size_t fw_size;
	uint32_t code_tags, code_size;
	struct clocktime fw_clock;
	struct timespec	fw_timespec;
	int i;

	/* Reset ARU */
	FPCI_WR4(sc, XUSB_CFG_ARU_RST, ARU_RST_RESET);
	DELAY(3000);

	/* Check if FALCON already runs */
	if (CSB_RD4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_LO) != 0) {
		device_printf(sc->dev,
		    "XUSB CPU is already loaded, CPUCTL: 0x%08X\n",
			 CSB_RD4(sc, XUSB_FALCON_CPUCTL));
		return (0);
	}

	fw = firmware_get(sc->fw_name);
	if (fw == NULL) {
		device_printf(sc->dev, "Cannot read xusb firmware\n");
		return (ENOENT);
	}

	/* Allocate uncached memory and copy firmware into. */
	fw_hdr = (const struct tegra_xusb_fw_hdr *)fw->data;
	fw_size = fw_hdr->fwimg_len;

	fw_vaddr = kmem_alloc_contig(kernel_arena, fw_size,
	    M_WAITOK, 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
	fw_paddr = vtophys(fw_vaddr);
	fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr;
	memcpy((void *)fw_vaddr, fw->data, fw_size);

	firmware_put(fw, FIRMWARE_UNLOAD);
	sc->fw_vaddr = fw_vaddr;
	sc->fw_size = fw_size;

	/* Setup firmware physical address and size. */
	fw_base = fw_paddr + sizeof(*fw_hdr);
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_ATTR, fw_size);
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_LO, fw_base & 0xFFFFFFFF);
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_HI, (uint64_t)fw_base >> 32);
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_APMAP, APMAP_BOOTPATH);

	/* Invalidate full L2IMEM context. */
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG,
	    L2IMEMOP_INVALIDATE_ALL);

	/* Program load of L2IMEM by boot code. */
	code_tags = howmany(fw_hdr->boot_codetag, XUSB_CSB_IMEM_BLOCK_SIZE);
	code_size = howmany(fw_hdr->boot_codesize, XUSB_CSB_IMEM_BLOCK_SIZE);
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_SIZE,
	    L2IMEMOP_SIZE_OFFSET(code_tags) |
	    L2IMEMOP_SIZE_SIZE(code_size));

	/* Execute L2IMEM boot code fetch. */
	CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG,
	    L2IMEMOP_LOAD_LOCKED_RESULT);

	/* Program FALCON auto-fill range and block count */
	CSB_WR4(sc, XUSB_FALCON_IMFILLCTL, code_size);
	CSB_WR4(sc, XUSB_FALCON_IMFILLRNG1,
	    IMFILLRNG1_TAG_LO(code_tags) |
	    IMFILLRNG1_TAG_HI(code_tags + code_size));

	CSB_WR4(sc, XUSB_FALCON_DMACTL, 0);
	/* Wait for CPU */
	for (i = 500; i > 0; i--) {
		if (CSB_RD4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT) &
		     L2IMEMOP_RESULT_VLD)
			break;
		DELAY(100);
	}
	if (i <= 0) {
		device_printf(sc->dev, "Timedout while wating for DMA, "
		    "state: 0x%08X\n",
		    CSB_RD4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT));
		return (ETIMEDOUT);
	}

	/* Boot FALCON cpu */
	CSB_WR4(sc, XUSB_FALCON_BOOTVEC, fw_hdr->boot_codetag);
	CSB_WR4(sc, XUSB_FALCON_CPUCTL, CPUCTL_STARTCPU);

	/* Wait for CPU */
	for (i = 50; i > 0; i--) {
		if (CSB_RD4(sc, XUSB_FALCON_CPUCTL) == CPUCTL_STOPPED)
			break;
		DELAY(100);
	}
	if (i <= 0) {
		device_printf(sc->dev, "Timedout while wating for FALCON cpu, "
		    "state: 0x%08X\n", CSB_RD4(sc, XUSB_FALCON_CPUCTL));
		return (ETIMEDOUT);
	}

	fw_timespec.tv_sec = fw_hdr->fwimg_created_time;
	fw_timespec.tv_nsec = 0;
	clock_ts_to_ct(&fw_timespec, &fw_clock);
	device_printf(sc->dev,
	    " Falcon firmware version: %02X.%02X.%04X,"
	    " (%d/%d/%d %d:%02d:%02d UTC)\n",
	    (fw_hdr->version_id >> 24) & 0xFF,(fw_hdr->version_id >> 15) & 0xFF,
	    fw_hdr->version_id & 0xFFFF,
	    fw_clock.day, fw_clock.mon, fw_clock.year,
	    fw_clock.hour, fw_clock.min, fw_clock.sec);

	return (0);
}
Example #28
0
static void
rtas_setup(void *junk)
{
	ihandle_t rtasi;
	cell_t rtas_size = 0, rtas_ptr;
	char path[31];
	int result;

	rtas = OF_finddevice("/rtas");
	if (rtas == -1) {
		rtas = 0;
		return;
	}
	OF_package_to_path(rtas, path, sizeof(path));
	rtasi = OF_open(path);
	if (rtasi == 0) {
		rtas = 0;
		printf("Error initializing RTAS: could not open node\n");
		return;
	}

	mtx_init(&rtas_mtx, "RTAS", MTX_DEF, 0);

	/* RTAS must be called with everything turned off in MSR */
	rtasmsr = mfmsr();
	rtasmsr &= ~(PSL_IR | PSL_DR | PSL_EE | PSL_SE);
	#ifdef __powerpc64__
	rtasmsr &= ~PSL_SF;
	#endif

	/*
	 * Allocate rtas_size + one page of contiguous, wired physical memory
	 * that can fit into a 32-bit address space and accessed from real mode.
	 * This is used both to bounce arguments and for RTAS private data.
	 *
	 * It must be 4KB-aligned and not cross a 256 MB boundary.
	 */

	OF_getprop(rtas, "rtas-size", &rtas_size, sizeof(rtas_size));
	rtas_size = round_page(rtas_size);
	rtas_bounce_virt = contigmalloc(rtas_size + PAGE_SIZE, M_RTAS, 0, 0,
	    ulmin(platform_real_maxaddr(), BUS_SPACE_MAXADDR_32BIT),
	    4096, 256*1024*1024);

	rtas_private_data = vtophys(rtas_bounce_virt);
	rtas_bounce_virt += rtas_size;	/* Actual bounce area */
	rtas_bounce_phys = vtophys(rtas_bounce_virt);
	rtas_bounce_size = PAGE_SIZE;

	/*
	 * Instantiate RTAS. We always use the 32-bit version.
	 */

	result = OF_call_method("instantiate-rtas", rtasi, 1, 1,
	    (cell_t)rtas_private_data, &rtas_ptr);
	OF_close(rtasi);

	if (result != 0) {
		rtas = 0;
		rtas_ptr = 0;
		printf("Error initializing RTAS (%d)\n", result);
		return;
	}

	rtas_entry = (uintptr_t)(rtas_ptr);
}
unsigned long
n8_GetLargeAllocation(N8_MemoryType_t bankIndex,
                      unsigned long size, unsigned char debug)
{

    NspInstance_t	*nip  = &NSPDeviceTable_g[0];	/* can only attach once */
    struct nsp_softc *sc = (struct nsp_softc *)nip->dev;

    bus_dma_segment_t seg;
    int rseg;
    void *kva = NULL;

#if 0
    /* Replacement for: */
    m = contigmalloc(size, M_DEVBUF, M_WAITOK,
                     0, 		/* lower acceptible phys addr	*/
                     0xffffffff,	/* upper acceptible phys addr	*/
                     PAGE_SIZE,		/* alignment			*/
                     0);		/* boundary			*/
#endif
    if (bus_dmamem_alloc(sc->dma_tag, size, PAGE_SIZE, 0,
                         &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
        printf("%s: can't alloc DMA buffer\n", sc->device.dv_xname);
        return 0;
    }
    if (bus_dmamem_map(sc->dma_tag, &seg, rseg, size, &kva,
                       BUS_DMA_NOWAIT)) {
        printf("%s: can't map DMA buffers (%lu bytes)\n", sc->device.dv_xname,
               size);
        bus_dmamem_free(sc->dma_tag, &seg, rseg);
        return 0;
    }
    if (bus_dmamap_create(sc->dma_tag, size, 1,
                          size, 0, BUS_DMA_NOWAIT, &DmaMap_g[bankIndex])) {
        printf("%s: can't create DMA map\n", sc->device.dv_xname);
        bus_dmamem_unmap(sc->dma_tag, kva, size);
        bus_dmamem_free(sc->dma_tag, &seg, rseg);
        return 0;
    }
    if (bus_dmamap_load(sc->dma_tag, DmaMap_g[bankIndex], kva, size,
                        NULL, BUS_DMA_NOWAIT)) {
        printf("%s: can't load DMA map\n", sc->device.dv_xname);
        bus_dmamap_destroy(sc->dma_tag, DmaMap_g[bankIndex]);
        bus_dmamem_unmap(sc->dma_tag, kva, size);
        bus_dmamem_free(sc->dma_tag, &seg, rseg);
        return 0;
    }
    if (kva) {
        /* bzero(kva, size) */
        BasePointer_g[bankIndex]    = kva;
        MemSize_g[bankIndex]        = size;
        Seg_g[bankIndex]            = seg;
        Rseg_g[bankIndex]           = rseg;
        MemBaseAddress_g[bankIndex] = vtophys((u_int)kva);
        MemTopAddress_g[bankIndex]  = MemBaseAddress_g[bankIndex] + size;
    }

    if (debug)
    {
        printf("n8_GetLargeAllocation: %p (0x%08lx) allocated for bankIndex %d\n",
               BasePointer_g[bankIndex], MemBaseAddress_g[bankIndex], bankIndex);
    }

    return MemBaseAddress_g[bankIndex];
}
Example #30
0
/*
 * If mode = 0, count how many descriptors are needed.
 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
 * 'next' field without syncing calls when the descriptor is already mapped.
 */
static void
via_map_blit_for_device(const drm_via_dmablit_t *xfer,
		   drm_via_sg_info_t *vsg, int mode)
{
	unsigned cur_descriptor_page = 0;
	unsigned num_descriptors_this_page = 0;
	unsigned char *mem_addr = xfer->mem_addr;
	unsigned char *cur_mem;
	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
	uint32_t fb_addr = xfer->fb_addr;
	uint32_t cur_fb;
	unsigned long line_len;
	unsigned remaining_len;
	int num_desc = 0;
	int cur_line;
	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
	drm_via_descriptor_t *desc_ptr = NULL;

	if (mode == 1)
		desc_ptr = vsg->desc_pages[cur_descriptor_page];

	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {

		line_len = xfer->line_length;
		cur_fb = fb_addr;
		cur_mem = mem_addr;

		while (line_len > 0) {

			remaining_len = min(PAGE_SIZE - VIA_PGOFF(cur_mem),
			    line_len);
			line_len -= remaining_len;

			if (mode == 1) {
				desc_ptr->mem_addr =
				    VM_PAGE_TO_PHYS(
				    vsg->pages[VIA_PFN(cur_mem) -
				    VIA_PFN(first_addr)]) + VIA_PGOFF(cur_mem);
				desc_ptr->dev_addr = cur_fb;

				desc_ptr->size = remaining_len;
				desc_ptr->next = (uint32_t) next;

				next = vtophys(desc_ptr);

				desc_ptr++;
				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
					num_descriptors_this_page = 0;
					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
				}
			}

			num_desc++;
			cur_mem += remaining_len;
			cur_fb += remaining_len;
		}

		mem_addr += xfer->mem_stride;
		fb_addr += xfer->fb_stride;
	}

	if (mode == 1) {
		vsg->chain_start = next;
		vsg->state = dr_via_device_mapped;
	}
	vsg->num_desc = num_desc;
}