static void bcm2835_cpufreq_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t *)arg; *addr = PHYS_TO_VCBUS(segs[0].ds_addr); }
static void vchiq_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t*)arg; *addr = PHYS_TO_VCBUS(segs[0].ds_addr); }
/* * Start DMA transaction * ch - channel number * src, dst - source and destination address in * ARM physical memory address space. * len - amount of bytes to be transfered * * Returns 0 on success, -1 otherwise */ int bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); cb = sc->sc_dma_ch[ch].cb; if (BCM2835_ARM_IS_IO(src)) cb->src = IO_TO_VCBUS(src); else cb->src = PHYS_TO_VCBUS(src); if (BCM2835_ARM_IS_IO(dst)) cb->dst = IO_TO_VCBUS(dst); else cb->dst = PHYS_TO_VCBUS(dst); cb->len = len; bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE); bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), sc->sc_dma_ch[ch].vc_cb); bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE); #ifdef DEBUG bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb); bcm_dma_reg_dump(ch); #endif return (0); }
static int create_pagelist(char __user *buf, size_t count, unsigned short type, struct proc *p, PAGELIST_T ** ppagelist) { PAGELIST_T *pagelist; vm_page_t* pages; unsigned long *addrs; unsigned int num_pages, offset, i; int pagelist_size; char *addr, *base_addr, *next_addr; int run, addridx, actual_pages; offset = (unsigned int)buf & (PAGE_SIZE - 1); num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; *ppagelist = NULL; /* Allocate enough storage to hold the page pointers and the page ** list */ pagelist_size = sizeof(PAGELIST_T) + (num_pages * sizeof(unsigned long)) + (num_pages * sizeof(vm_page_t)); pagelist = malloc(pagelist_size, M_VCPAGELIST, M_WAITOK | M_ZERO); vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %x", (unsigned int)pagelist); if (!pagelist) return -ENOMEM; addrs = pagelist->addrs; pages = (vm_page_t*)(addrs + num_pages); actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map, (vm_offset_t)buf, count, (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages); if (actual_pages != num_pages) { vm_page_unhold_pages(pages, actual_pages); free(pagelist, M_VCPAGELIST); return (-ENOMEM); } pagelist->length = count; pagelist->type = type; pagelist->offset = offset; /* Group the pages into runs of contiguous pages */ base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0])); next_addr = base_addr + PAGE_SIZE; addridx = 0; run = 0; for (i = 1; i < num_pages; i++) { addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i])); if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { next_addr += PAGE_SIZE; run++; } else { addrs[addridx] = (unsigned long)base_addr + run; addridx++; base_addr = addr; next_addr = addr + PAGE_SIZE; run = 0; } } addrs[addridx] = (unsigned long)base_addr + run; addridx++; /* Partial cache lines (fragments) require special measures */ if ((type == PAGELIST_READ) && ((pagelist->offset & (CACHE_LINE_SIZE - 1)) || ((pagelist->offset + pagelist->length) & (CACHE_LINE_SIZE - 1)))) { FRAGMENTS_T *fragments; if (down_interruptible(&g_free_fragments_sema) != 0) { free(pagelist, M_VCPAGELIST); return -EINTR; } WARN_ON(g_free_fragments == NULL); down(&g_free_fragments_mutex); fragments = (FRAGMENTS_T *) g_free_fragments; WARN_ON(fragments == NULL); g_free_fragments = *(FRAGMENTS_T **) g_free_fragments; up(&g_free_fragments_mutex); pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + (fragments - g_fragments_base); } cpu_dcache_wbinv_range((vm_offset_t)pagelist, pagelist_size); *ppagelist = pagelist; return 0; }
static int create_pagelist(char __user *buf, size_t count, unsigned short type, struct proc *p, BULKINFO_T *bi) { PAGELIST_T *pagelist; vm_page_t* pages; unsigned long *addrs; unsigned int num_pages, i; vm_offset_t offset; int pagelist_size; char *addr, *base_addr, *next_addr; int run, addridx, actual_pages; int err; vm_paddr_t pagelist_phys; offset = (vm_offset_t)buf & (PAGE_SIZE - 1); num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; bi->pagelist = NULL; bi->buf = buf; bi->size = count; /* Allocate enough storage to hold the page pointers and the page ** list */ pagelist_size = sizeof(PAGELIST_T) + (num_pages * sizeof(unsigned long)) + (num_pages * sizeof(pages[0])); err = bus_dma_tag_create( NULL, PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ pagelist_size, 1, /* maxsize, nsegments */ pagelist_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &bi->pagelist_dma_tag); err = bus_dmamem_alloc(bi->pagelist_dma_tag, (void **)&pagelist, BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bi->pagelist_dma_map); if (err) { vchiq_log_error(vchiq_core_log_level, "Unable to allocate pagelist memory"); err = -ENOMEM; goto failed_alloc; } err = bus_dmamap_load(bi->pagelist_dma_tag, bi->pagelist_dma_map, pagelist, pagelist_size, vchiq_dmamap_cb, &pagelist_phys, 0); if (err) { vchiq_log_error(vchiq_core_log_level, "cannot load DMA map for pagelist memory"); err = -ENOMEM; goto failed_load; } vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %x", (unsigned int)pagelist); if (!pagelist) return -ENOMEM; addrs = pagelist->addrs; pages = (vm_page_t*)(addrs + num_pages); actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map, (vm_offset_t)buf, count, (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages); if (actual_pages != num_pages) { vm_page_unhold_pages(pages, actual_pages); free(pagelist, M_VCPAGELIST); return (-ENOMEM); } pagelist->length = count; pagelist->type = type; pagelist->offset = offset; /* Group the pages into runs of contiguous pages */ base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0])); next_addr = base_addr + PAGE_SIZE; addridx = 0; run = 0; for (i = 1; i < num_pages; i++) { addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i])); if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { next_addr += PAGE_SIZE; run++; } else { addrs[addridx] = (unsigned long)base_addr + run; addridx++; base_addr = addr; next_addr = addr + PAGE_SIZE; run = 0; } } addrs[addridx] = (unsigned long)base_addr + run; addridx++; /* Partial cache lines (fragments) require special measures */ if ((type == PAGELIST_READ) && ((pagelist->offset & (CACHE_LINE_SIZE - 1)) || ((pagelist->offset + pagelist->length) & (CACHE_LINE_SIZE - 1)))) { FRAGMENTS_T *fragments; if (down_interruptible(&g_free_fragments_sema) != 0) { free(pagelist, M_VCPAGELIST); return -EINTR; } WARN_ON(g_free_fragments == NULL); down(&g_free_fragments_mutex); fragments = (FRAGMENTS_T *) g_free_fragments; WARN_ON(fragments == NULL); g_free_fragments = *(FRAGMENTS_T **) g_free_fragments; up(&g_free_fragments_mutex); pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + (fragments - g_fragments_base); } /* XXX: optimize? INV operation for read WBINV for write? */ cpu_dcache_wbinv_range((vm_offset_t)buf, count); bi->pagelist = pagelist; return 0; failed_load: bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map); failed_alloc: bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map); bus_dma_tag_destroy(bi->pagelist_dma_tag); return err; }