static void free_pagelist(PAGELIST_T *pagelist, int actual) { vm_page_t*pages; unsigned int num_pages, i; void *page_address; vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %x, %d", (unsigned int)pagelist, actual); num_pages = (pagelist->length + pagelist->offset + PAGE_SIZE - 1) / PAGE_SIZE; pages = (vm_page_t*)(pagelist->addrs + num_pages); /* Deal with any partial cache lines (fragments) */ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { FRAGMENTS_T *fragments = g_fragments_base + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS); int head_bytes, tail_bytes; head_bytes = (CACHE_LINE_SIZE - pagelist->offset) & (CACHE_LINE_SIZE - 1); tail_bytes = (pagelist->offset + actual) & (CACHE_LINE_SIZE - 1); if (actual >= 0) { /* XXXBSD: might be inefficient */ page_address = pmap_mapdev(VM_PAGE_TO_PHYS(pages[0]), PAGE_SIZE*num_pages); } else page_address = NULL; if ((actual >= 0) && (head_bytes != 0)) { if (head_bytes > actual) head_bytes = actual; memcpy((char *)page_address + pagelist->offset, fragments->headbuf, head_bytes); } if ((actual >= 0) && (head_bytes < actual) && (tail_bytes != 0)) { memcpy((char *)page_address + PAGE_SIZE*(num_pages - 1) + ((pagelist->offset + actual) & (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)), fragments->tailbuf, tail_bytes); } if (page_address) pmap_qremove((vm_offset_t)page_address, PAGE_SIZE*num_pages); down(&g_free_fragments_mutex); *(FRAGMENTS_T **) fragments = g_free_fragments; g_free_fragments = fragments; up(&g_free_fragments_mutex); up(&g_free_fragments_sema); } for (i = 0; i < num_pages; i++) { if (pagelist->type != PAGELIST_WRITE) vm_page_dirty(pages[i]); } vm_page_unhold_pages(pages, num_pages); free(pagelist, M_VCPAGELIST); }
static int create_pagelist(char __user *buf, size_t count, unsigned short type, struct proc *p, PAGELIST_T ** ppagelist) { PAGELIST_T *pagelist; vm_page_t* pages; unsigned long *addrs; unsigned int num_pages, offset, i; int pagelist_size; char *addr, *base_addr, *next_addr; int run, addridx, actual_pages; offset = (unsigned int)buf & (PAGE_SIZE - 1); num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; *ppagelist = NULL; /* Allocate enough storage to hold the page pointers and the page ** list */ pagelist_size = sizeof(PAGELIST_T) + (num_pages * sizeof(unsigned long)) + (num_pages * sizeof(vm_page_t)); pagelist = malloc(pagelist_size, M_VCPAGELIST, M_WAITOK | M_ZERO); vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %x", (unsigned int)pagelist); if (!pagelist) return -ENOMEM; addrs = pagelist->addrs; pages = (vm_page_t*)(addrs + num_pages); actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map, (vm_offset_t)buf, count, (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages); if (actual_pages != num_pages) { vm_page_unhold_pages(pages, actual_pages); free(pagelist, M_VCPAGELIST); return (-ENOMEM); } pagelist->length = count; pagelist->type = type; pagelist->offset = offset; /* Group the pages into runs of contiguous pages */ base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0])); next_addr = base_addr + PAGE_SIZE; addridx = 0; run = 0; for (i = 1; i < num_pages; i++) { addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i])); if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { next_addr += PAGE_SIZE; run++; } else { addrs[addridx] = (unsigned long)base_addr + run; addridx++; base_addr = addr; next_addr = addr + PAGE_SIZE; run = 0; } } addrs[addridx] = (unsigned long)base_addr + run; addridx++; /* Partial cache lines (fragments) require special measures */ if ((type == PAGELIST_READ) && ((pagelist->offset & (CACHE_LINE_SIZE - 1)) || ((pagelist->offset + pagelist->length) & (CACHE_LINE_SIZE - 1)))) { FRAGMENTS_T *fragments; if (down_interruptible(&g_free_fragments_sema) != 0) { free(pagelist, M_VCPAGELIST); return -EINTR; } WARN_ON(g_free_fragments == NULL); down(&g_free_fragments_mutex); fragments = (FRAGMENTS_T *) g_free_fragments; WARN_ON(fragments == NULL); g_free_fragments = *(FRAGMENTS_T **) g_free_fragments; up(&g_free_fragments_mutex); pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + (fragments - g_fragments_base); } cpu_dcache_wbinv_range((vm_offset_t)pagelist, pagelist_size); *ppagelist = pagelist; return 0; }
static void free_pagelist(PAGELIST_T *pagelist, int actual) { unsigned long *need_release; struct page **pages; unsigned int num_pages, i; vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %x, %d", (unsigned int)pagelist, actual); num_pages = (pagelist->length + pagelist->offset + PAGE_SIZE - 1) / PAGE_SIZE; need_release = (unsigned long *)(pagelist->addrs + num_pages); pages = (struct page **)(pagelist->addrs + num_pages + 1); /* Deal with any partial cache lines (fragments) */ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { FRAGMENTS_T *fragments = g_fragments_base + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS); int head_bytes, tail_bytes; head_bytes = (CACHE_LINE_SIZE - pagelist->offset) & (CACHE_LINE_SIZE - 1); tail_bytes = (pagelist->offset + actual) & (CACHE_LINE_SIZE - 1); if ((actual >= 0) && (head_bytes != 0)) { if (head_bytes > actual) head_bytes = actual; memcpy((char *)page_address(pages[0]) + pagelist->offset, fragments->headbuf, head_bytes); } if ((actual >= 0) && (head_bytes < actual) && (tail_bytes != 0)) { memcpy((char *)page_address(pages[num_pages - 1]) + ((pagelist->offset + actual) & (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)), fragments->tailbuf, tail_bytes); } down(&g_free_fragments_mutex); *(FRAGMENTS_T **) fragments = g_free_fragments; g_free_fragments = fragments; up(&g_free_fragments_mutex); up(&g_free_fragments_sema); } if (*need_release) { for (i = 0; i < num_pages; i++) { if (pagelist->type != PAGELIST_WRITE) set_page_dirty(pages[i]); page_cache_release(pages[i]); } } kfree(pagelist); }
static int create_pagelist(char __user *buf, size_t count, unsigned short type, struct task_struct *task, PAGELIST_T ** ppagelist) { PAGELIST_T *pagelist; struct page **pages; struct page *page; unsigned long *addrs; unsigned int num_pages, offset, i; char *addr, *base_addr, *next_addr; int run, addridx, actual_pages; unsigned long *need_release; offset = (unsigned int)buf & (PAGE_SIZE - 1); num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; *ppagelist = NULL; /* Allocate enough storage to hold the page pointers and the page ** list */ pagelist = kmalloc(sizeof(PAGELIST_T) + (num_pages * sizeof(unsigned long)) + sizeof(unsigned long) + (num_pages * sizeof(pages[0])), GFP_KERNEL); vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %x", (unsigned int)pagelist); if (!pagelist) return -ENOMEM; addrs = pagelist->addrs; need_release = (unsigned long *)(addrs + num_pages); pages = (struct page **)(addrs + num_pages + 1); if (is_vmalloc_addr(buf)) { for (actual_pages = 0; actual_pages < num_pages; actual_pages++) { pages[actual_pages] = vmalloc_to_page(buf + (actual_pages * PAGE_SIZE)); } *need_release = 0; /* do not try and release vmalloc pages */ } else { down_read(&task->mm->mmap_sem); actual_pages = get_user_pages(task, task->mm, (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages, (type == PAGELIST_READ) /*Write */ , 0 /*Force */ , pages, NULL /*vmas */); up_read(&task->mm->mmap_sem); if (actual_pages != num_pages) { vchiq_log_info(vchiq_arm_log_level, "create_pagelist - only %d/%d pages locked", actual_pages, num_pages); /* This is probably due to the process being killed */ while (actual_pages > 0) { actual_pages--; page_cache_release(pages[actual_pages]); } kfree(pagelist); if (actual_pages == 0) actual_pages = -ENOMEM; return actual_pages; } *need_release = 1; /* release user pages */ } pagelist->length = count; pagelist->type = type; pagelist->offset = offset; /* Group the pages into runs of contiguous pages */ base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0])); next_addr = base_addr + PAGE_SIZE; addridx = 0; run = 0; for (i = 1; i < num_pages; i++) { addr = VCHIQ_ARM_ADDRESS(page_address(pages[i])); if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { next_addr += PAGE_SIZE; run++; } else { addrs[addridx] = (unsigned long)base_addr + run; addridx++; base_addr = addr; next_addr = addr + PAGE_SIZE; run = 0; } } addrs[addridx] = (unsigned long)base_addr + run; addridx++; /* Partial cache lines (fragments) require special measures */ if ((type == PAGELIST_READ) && ((pagelist->offset & (CACHE_LINE_SIZE - 1)) || ((pagelist->offset + pagelist->length) & (CACHE_LINE_SIZE - 1)))) { FRAGMENTS_T *fragments; if (down_interruptible(&g_free_fragments_sema) != 0) { kfree(pagelist); return -EINTR; } WARN_ON(g_free_fragments == NULL); down(&g_free_fragments_mutex); fragments = (FRAGMENTS_T *) g_free_fragments; WARN_ON(fragments == NULL); g_free_fragments = *(FRAGMENTS_T **) g_free_fragments; up(&g_free_fragments_mutex); pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + (fragments - g_fragments_base); } for (page = virt_to_page(pagelist); page <= virt_to_page(addrs + num_pages - 1); page++) { flush_dcache_page(page); } *ppagelist = pagelist; return 0; }
static void free_pagelist(BULKINFO_T *bi, int actual) { vm_page_t*pages; unsigned int num_pages, i; void *page_address; PAGELIST_T *pagelist; pagelist = bi->pagelist; vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %x, %d", (unsigned int)pagelist, actual); num_pages = (pagelist->length + pagelist->offset + PAGE_SIZE - 1) / PAGE_SIZE; pages = (vm_page_t*)(pagelist->addrs + num_pages); /* Deal with any partial cache lines (fragments) */ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { FRAGMENTS_T *fragments = g_fragments_base + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS); int head_bytes, tail_bytes; head_bytes = (CACHE_LINE_SIZE - pagelist->offset) & (CACHE_LINE_SIZE - 1); tail_bytes = (pagelist->offset + actual) & (CACHE_LINE_SIZE - 1); if ((actual >= 0) && (head_bytes != 0)) { if (head_bytes > actual) head_bytes = actual; memcpy((char *)bi->buf, fragments->headbuf, head_bytes); } if ((actual >= 0) && (head_bytes < actual) && (tail_bytes != 0)) { memcpy((char *)bi->buf + actual - tail_bytes, fragments->tailbuf, tail_bytes); } down(&g_free_fragments_mutex); *(FRAGMENTS_T **) fragments = g_free_fragments; g_free_fragments = fragments; up(&g_free_fragments_mutex); up(&g_free_fragments_sema); } for (i = 0; i < num_pages; i++) { if (pagelist->type != PAGELIST_WRITE) vm_page_dirty(pages[i]); } vm_page_unhold_pages(pages, num_pages); bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map); bus_dmamem_free(bi->pagelist_dma_tag, bi->pagelist, bi->pagelist_dma_map); bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map); bus_dma_tag_destroy(bi->pagelist_dma_tag); free(bi, M_VCPAGELIST); }
static int create_pagelist(char __user *buf, size_t count, unsigned short type, struct proc *p, BULKINFO_T *bi) { PAGELIST_T *pagelist; vm_page_t* pages; unsigned long *addrs; unsigned int num_pages, i; vm_offset_t offset; int pagelist_size; char *addr, *base_addr, *next_addr; int run, addridx, actual_pages; int err; vm_paddr_t pagelist_phys; offset = (vm_offset_t)buf & (PAGE_SIZE - 1); num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; bi->pagelist = NULL; bi->buf = buf; bi->size = count; /* Allocate enough storage to hold the page pointers and the page ** list */ pagelist_size = sizeof(PAGELIST_T) + (num_pages * sizeof(unsigned long)) + (num_pages * sizeof(pages[0])); err = bus_dma_tag_create( NULL, PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ pagelist_size, 1, /* maxsize, nsegments */ pagelist_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &bi->pagelist_dma_tag); err = bus_dmamem_alloc(bi->pagelist_dma_tag, (void **)&pagelist, BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bi->pagelist_dma_map); if (err) { vchiq_log_error(vchiq_core_log_level, "Unable to allocate pagelist memory"); err = -ENOMEM; goto failed_alloc; } err = bus_dmamap_load(bi->pagelist_dma_tag, bi->pagelist_dma_map, pagelist, pagelist_size, vchiq_dmamap_cb, &pagelist_phys, 0); if (err) { vchiq_log_error(vchiq_core_log_level, "cannot load DMA map for pagelist memory"); err = -ENOMEM; goto failed_load; } vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %x", (unsigned int)pagelist); if (!pagelist) return -ENOMEM; addrs = pagelist->addrs; pages = (vm_page_t*)(addrs + num_pages); actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map, (vm_offset_t)buf, count, (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages); if (actual_pages != num_pages) { vm_page_unhold_pages(pages, actual_pages); free(pagelist, M_VCPAGELIST); return (-ENOMEM); } pagelist->length = count; pagelist->type = type; pagelist->offset = offset; /* Group the pages into runs of contiguous pages */ base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0])); next_addr = base_addr + PAGE_SIZE; addridx = 0; run = 0; for (i = 1; i < num_pages; i++) { addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i])); if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { next_addr += PAGE_SIZE; run++; } else { addrs[addridx] = (unsigned long)base_addr + run; addridx++; base_addr = addr; next_addr = addr + PAGE_SIZE; run = 0; } } addrs[addridx] = (unsigned long)base_addr + run; addridx++; /* Partial cache lines (fragments) require special measures */ if ((type == PAGELIST_READ) && ((pagelist->offset & (CACHE_LINE_SIZE - 1)) || ((pagelist->offset + pagelist->length) & (CACHE_LINE_SIZE - 1)))) { FRAGMENTS_T *fragments; if (down_interruptible(&g_free_fragments_sema) != 0) { free(pagelist, M_VCPAGELIST); return -EINTR; } WARN_ON(g_free_fragments == NULL); down(&g_free_fragments_mutex); fragments = (FRAGMENTS_T *) g_free_fragments; WARN_ON(fragments == NULL); g_free_fragments = *(FRAGMENTS_T **) g_free_fragments; up(&g_free_fragments_mutex); pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + (fragments - g_fragments_base); } /* XXX: optimize? INV operation for read WBINV for write? */ cpu_dcache_wbinv_range((vm_offset_t)buf, count); bi->pagelist = pagelist; return 0; failed_load: bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map); failed_alloc: bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map); bus_dma_tag_destroy(bi->pagelist_dma_tag); return err; }