static int mic_psmi_alloc_buffer(mic_ctx_t *mic_ctx) { int i, j, ret; void *va; dma_addr_t dma_hndl; struct mic_psmi_ctx *psmi_ctx = &mic_ctx->bi_psmi; /* allocate psmi page tables */ psmi_ctx->nr_dma_pages = ALIGN(psmi_ctx->dma_mem_size, MIC_PSMI_PAGE_SIZE) / MIC_PSMI_PAGE_SIZE; if ((psmi_ctx->va_tbl = kmalloc(psmi_ctx->nr_dma_pages * sizeof(struct mic_psmi_pte), GFP_KERNEL)) == NULL) { printk("mic: psmi va table alloc failed\n"); return -ENOMEM; } psmi_ctx->dma_tbl_size = (psmi_ctx->nr_dma_pages + 2) * sizeof(struct mic_psmi_pte); if ((psmi_ctx->dma_tbl = kmalloc(psmi_ctx->dma_tbl_size, GFP_KERNEL)) == NULL) { printk("mic: psmi dma table alloc failed\n"); ret = -ENOMEM; goto free_va_tbl; } psmi_ctx->dma_tbl_hndl = pci_map_single(mic_ctx->bi_pdev, psmi_ctx->dma_tbl, psmi_ctx->dma_tbl_size, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(mic_ctx->bi_pdev, psmi_ctx->dma_tbl_hndl)) { printk("mic: psmi dma table mapping failed\n"); ret = -ENOMEM; goto free_dma_tbl; } /* allocate psmi pages */ for (i = 0; i < psmi_ctx->nr_dma_pages; i++) { if ((va = (void *)__get_free_pages( GFP_KERNEL | __GFP_HIGHMEM, MIC_PSMI_PAGE_ORDER)) == NULL) { printk("mic: psmi page alloc failed: %d\n", i); ret = -ENOMEM; goto free_ptes; } memset(va, 0, MIC_PSMI_PAGE_SIZE); dma_hndl = pci_map_single(mic_ctx->bi_pdev, va, MIC_PSMI_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(mic_ctx->bi_pdev, dma_hndl)) { printk("mic: psmi page mapping failed: %d\n", i); free_pages((unsigned long)va, MIC_PSMI_PAGE_ORDER); ret = -ENOMEM; goto free_ptes; } psmi_ctx->dma_tbl[i + 1].pa = dma_hndl; psmi_ctx->va_tbl[i].pa = (uint64_t)va; } psmi_ctx->dma_tbl[0].pa = MIC_PSMI_SIGNATURE; psmi_ctx->dma_tbl[psmi_ctx->nr_dma_pages + 1].pa = MIC_PSMI_SIGNATURE; printk("mic: psmi #%d, %ld bytes, " "dma_tbl va=0x%lx hndl=0x%lx\n", mic_ctx->bi_id + 1, (unsigned long)psmi_ctx->dma_mem_size, (unsigned long)psmi_ctx->dma_tbl, (unsigned long)psmi_ctx->dma_tbl_hndl); return 0; free_ptes: for (j = 1; j < i; j++) mic_psmi_free_pte(mic_ctx, j); pci_unmap_single(mic_ctx->bi_pdev, psmi_ctx->dma_tbl_hndl, psmi_ctx->dma_tbl_size, PCI_DMA_BIDIRECTIONAL); free_dma_tbl: kfree(psmi_ctx->dma_tbl); psmi_ctx->dma_tbl = NULL; free_va_tbl: kfree(psmi_ctx->va_tbl); psmi_ctx->va_tbl = NULL; return ret; }
/* Allocate pages for the real mode code and the protected mode code for linux as well as a memory map buffer. */ static grub_err_t allocate_pages (grub_size_t prot_size, grub_size_t *align, grub_size_t min_align, int relocatable, grub_uint64_t preferred_address) { grub_err_t err; if (prot_size == 0) prot_size = 1; prot_size = page_align (prot_size); /* Initialize the memory pointers with NULL for convenience. */ free_pages (); relocator = grub_relocator_new (); if (!relocator) { err = grub_errno; goto fail; } /* FIXME: Should request low memory from the heap when this feature is implemented. */ { grub_relocator_chunk_t ch; if (relocatable) { err = grub_relocator_alloc_chunk_align (relocator, &ch, preferred_address, preferred_address, prot_size, 1, GRUB_RELOCATOR_PREFERENCE_LOW, 1); for (; err && *align + 1 > min_align; (*align)--) { grub_errno = GRUB_ERR_NONE; err = grub_relocator_alloc_chunk_align (relocator, &ch, 0x1000000, 0xffffffff & ~prot_size, prot_size, 1 << *align, GRUB_RELOCATOR_PREFERENCE_LOW, 1); } if (err) goto fail; } else err = grub_relocator_alloc_chunk_addr (relocator, &ch, preferred_address, prot_size); if (err) goto fail; prot_mode_mem = get_virtual_current_address (ch); prot_mode_target = get_physical_target_address (ch); } grub_dprintf ("linux", "prot_mode_mem = %p, prot_mode_target = %lx, prot_size = %x\n", prot_mode_mem, (unsigned long) prot_mode_target, (unsigned) prot_size); return GRUB_ERR_NONE; fail: free_pages (); return err; }
static inline void free_thread_info(struct thread_info *ti) { free_pages((unsigned long)ti, THREAD_SIZE_ORDER); }
static struct xpc_gru_mq_uv * xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, irq_handler_t irq_handler) { enum xp_retval xp_ret; int ret; int nid; int nasid; int pg_order; struct page *page; struct xpc_gru_mq_uv *mq; struct uv_IO_APIC_route_entry *mmr_value; mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); if (mq == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a xpc_gru_mq_uv structure\n"); ret = -ENOMEM; goto out_0; } mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), GFP_KERNEL); if (mq->gru_mq_desc == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a gru_message_queue_desc structure\n"); ret = -ENOMEM; goto out_1; } pg_order = get_order(mq_size); mq->order = pg_order + PAGE_SHIFT; mq_size = 1UL << mq->order; mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); ret = -ENOMEM; goto out_2; } mq->address = page_address(page); /* enable generation of irq when GRU mq operation occurs to this mq */ ret = xpc_gru_mq_watchlist_alloc_uv(mq); if (ret != 0) goto out_3; ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); if (ret != 0) goto out_4; ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); if (ret != 0) { dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", mq->irq, -ret); goto out_5; } nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, nasid, mmr_value->vector, mmr_value->dest); if (ret != 0) { dev_err(xpc_part, "gru_create_message_queue() returned " "error=%d\n", ret); ret = -EINVAL; goto out_6; } /* allow other partitions to access this GRU mq */ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); if (xp_ret != xpSuccess) { ret = -EACCES; goto out_6; } return mq; /* something went wrong */ out_6: free_irq(mq->irq, NULL); out_5: xpc_release_gru_mq_irq_uv(mq); out_4: xpc_gru_mq_watchlist_free_uv(mq); out_3: free_pages((unsigned long)mq->address, pg_order); out_2: kfree(mq->gru_mq_desc); out_1: kfree(mq); out_0: return ERR_PTR(ret); }
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); }
// put_kstack - free the memory space of process kernel stack static void put_kstack(struct proc_struct *proc) { free_pages(kva2page((void *)(proc->kstack)), KSTACKPAGE); }
int main(int argc, char **argv) { char *msg; msg = parse_opts(argc, argv, NULL, NULL); if (msg != NULL) { tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg); } setup(); #if HAVE_NUMA_MOVE_PAGES unsigned int i; int lc; unsigned int from_node; unsigned int to_node; int ret; ret = get_allowed_nodes(NH_MEMS, 2, &from_node, &to_node); if (ret < 0) tst_brkm(TBROK | TERRNO, cleanup, "get_allowed_nodes: %d", ret); /* check for looping state if -i option is given */ for (lc = 0; TEST_LOOPING(lc); lc++) { void *pages[TEST_PAGES] = { 0 }; int nodes[TEST_PAGES]; int status[TEST_PAGES]; unsigned long onepage = get_page_size(); /* reset Tst_count in case we are looping */ Tst_count = 0; ret = alloc_pages_on_node(pages, TOUCHED_PAGES, from_node); if (ret == -1) continue; /* Allocate page and do not touch it. */ pages[UNTOUCHED_PAGE] = numa_alloc_onnode(onepage, from_node); if (pages[UNTOUCHED_PAGE] == NULL) { tst_resm(TBROK, "failed allocating page on node %d", from_node); goto err_free_pages; } for (i = 0; i < TEST_PAGES; i++) nodes[i] = to_node; ret = numa_move_pages(0, TEST_PAGES, pages, nodes, status, MPOL_MF_MOVE); TEST_ERRNO = errno; if (ret == -1) { tst_resm(TFAIL | TERRNO, "move_pages unexpectedly failed"); goto err_free_pages; } if (status[UNTOUCHED_PAGE] == -ENOENT) tst_resm(TPASS, "status[%d] set to expected -ENOENT", UNTOUCHED_PAGE); else tst_resm(TFAIL, "status[%d] is %d", UNTOUCHED_PAGE, status[UNTOUCHED_PAGE]); err_free_pages: /* This is capable of freeing both the touched and * untouched pages. */ free_pages(pages, TEST_PAGES); } #else tst_resm(TCONF, "move_pages support not found."); #endif cleanup(); tst_exit(); }
int main(int argc, char *argv[]) { pm_kernel_t *ker; pm_process_t *proc; pid_t *pids; size_t num_procs; size_t i; pm_map_t **maps; size_t num_maps; char cmdline[256]; // this must be within the range of int int error; int rc = EXIT_SUCCESS; uint8_t pr_flags = 0; struct ksm_pages kp; memset(&kp, 0, sizeof(kp)); opterr = 0; do { int c = getopt(argc, argv, "hvsa"); if (c == -1) break; switch (c) { case 'a': pr_flags |= PR_ALL; break; case 's': pr_flags |= PR_SORTED; break; case 'v': pr_flags |= PR_VERBOSE; break; case 'h': usage(argv[0]); exit(EXIT_SUCCESS); case '?': fprintf(stderr, "unknown option: %c\n", optopt); usage(argv[0]); exit(EXIT_FAILURE); } } while (1); error = pm_kernel_create(&ker); if (error) { fprintf(stderr, "Error creating kernel interface -- " "does this kernel have pagemap?\n"); exit(EXIT_FAILURE); } if (pr_flags & PR_ALL) { error = pm_kernel_pids(ker, &pids, &num_procs); if (error) { fprintf(stderr, "Error listing processes.\n"); exit(EXIT_FAILURE); } } else { if (optind != argc - 1) { usage(argv[0]); exit(EXIT_FAILURE); } pids = malloc(sizeof(*pids)); if (pids == NULL) { fprintf(stderr, "Error allocating pid memory\n"); exit(EXIT_FAILURE); } *pids = strtoul(argv[optind], NULL, 10); if (*pids == 0) { fprintf(stderr, "Invalid PID\n"); rc = EXIT_FAILURE; goto exit; } num_procs = 1; if (getprocname(*pids, cmdline, sizeof(cmdline)) < 0) { cmdline[0] = '\0'; } printf("%s (%u):\n", cmdline, *pids); } printf("Warning: this tool only compares the KSM CRCs of pages, there is a chance of " "collisions\n"); for (i = 0; i < num_procs; i++) { error = pm_process_create(ker, pids[i], &proc); if (error) { fprintf(stderr, "warning: could not create process interface for %d\n", pids[i]); rc = EXIT_FAILURE; goto exit; } error = pm_process_maps(proc, &maps, &num_maps); if (error) { pm_process_destroy(proc); fprintf(stderr, "warning: could not read process map for %d\n", pids[i]); rc = EXIT_FAILURE; goto exit; } if (read_pages(&kp, maps, num_maps, pr_flags) < 0) { free(maps); pm_process_destroy(proc); rc = EXIT_FAILURE; goto exit; } free(maps); pm_process_destroy(proc); } if (pr_flags & PR_SORTED) { qsort(kp.pages, kp.len, sizeof(*kp.pages), cmp_pages); } print_pages(&kp, pr_flags); exit: free_pages(&kp, pr_flags); free(pids); return rc; }
static void ipath_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, u64 dma_handle) { free_pages((unsigned long) cpu_addr, get_order(size)); }
/* * img_update_realloc: This function allocates the contiguous pages to * accommodate the requested size of data. The memory address and size * values are stored globally and on every call to this function the new * size is checked to see if more data is required than the existing size. * If true the previous memory is freed and new allocation is done to * accommodate the new size. If the incoming size is less then than the * already allocated size, then that memory is reused. This function is * called with lock held and returns with lock held. */ static int img_update_realloc(unsigned long size) { unsigned char *image_update_buffer = NULL; unsigned long rc; unsigned long img_buf_phys_addr; int ordernum; int dma_alloc = 0; /* * check if the buffer of sufficient size has been * already allocated */ if (rbu_data.image_update_buffer_size >= size) { /* * check for corruption */ if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { printk(KERN_ERR "dell_rbu:%s: corruption " "check failed\n", __func__); return -EINVAL; } /* * we have a valid pre-allocated buffer with * sufficient size */ return 0; } /* * free any previously allocated buffer */ img_update_free(); spin_unlock(&rbu_data.lock); ordernum = get_order(size); image_update_buffer = (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); img_buf_phys_addr = (unsigned long) virt_to_phys(image_update_buffer); if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { free_pages((unsigned long) image_update_buffer, ordernum); ordernum = -1; image_update_buffer = dma_alloc_coherent(NULL, size, &dell_rbu_dmaaddr, GFP_KERNEL); dma_alloc = 1; } spin_lock(&rbu_data.lock); if (image_update_buffer != NULL) { rbu_data.image_update_buffer = image_update_buffer; rbu_data.image_update_buffer_size = size; rbu_data.bios_image_size = rbu_data.image_update_buffer_size; rbu_data.image_update_ordernum = ordernum; rbu_data.dma_alloc = dma_alloc; rc = 0; } else { pr_debug("Not enough memory for image update:" "size = %ld\n", size); rc = -ENOMEM; } return rc; }
static void slob_free_pages(void *b, int order) { if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; free_pages((unsigned long)b, order); }
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { free_pages((unsigned long) vaddr, get_order(size)); }
static int dovefb_ovly_ioctl(struct fb_info *fi, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct dovefb_layer_info *dfli = fi->par; u32 x; int vmode = 0; int gfx_on = 1; int vid_on = 1; int interpolation = 0; switch (cmd) { case DOVEFB_IOCTL_WAIT_VSYNC: wait_for_vsync(dfli); break; case DOVEFB_IOCTL_GET_VIEWPORT_INFO: return copy_to_user(argp, &dfli->surface.viewPortInfo, sizeof(struct _sViewPortInfo)) ? -EFAULT : 0; case DOVEFB_IOCTL_SET_VIEWPORT_INFO: mutex_lock(&dfli->access_ok); if (copy_from_user(&gViewPortInfo, argp, sizeof(gViewPortInfo))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } if (check_surface(fi, -1, &gViewPortInfo, 0, 0)) dovefb_ovly_set_par(fi); mutex_unlock(&dfli->access_ok); break; case DOVEFB_IOCTL_SET_VIDEO_MODE: /* * Get data from user space. */ if (copy_from_user(&vmode, argp, sizeof(vmode))) return -EFAULT; if (check_surface(fi, vmode, 0, 0, 0)) dovefb_ovly_set_par(fi); break; case DOVEFB_IOCTL_GET_VIDEO_MODE: return copy_to_user(argp, &dfli->surface.videoMode, sizeof(u32)) ? -EFAULT : 0; case DOVEFB_IOCTL_CREATE_VID_BUFFER: { struct _sOvlySurface OvlySurface; mutex_lock(&dfli->access_ok); if (copy_from_user(&OvlySurface, argp, sizeof(struct _sOvlySurface))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } /* Request a video buffer. */ dovefb_ovly_create_surface(&OvlySurface); if (copy_to_user(argp, &OvlySurface, sizeof(struct _sOvlySurface))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } mutex_unlock(&dfli->access_ok); break; } case DOVEFB_IOCTL_FLIP_VID_BUFFER: { struct _sOvlySurface *surface = 0; u8 *start_addr, *input_data, *dst_addr; u32 length; surface = kmalloc(sizeof(struct _sOvlySurface), GFP_KERNEL); /* Get user-mode data. */ if (copy_from_user(surface, argp, sizeof(struct _sOvlySurface))) { kfree(surface); return -EFAULT; } mutex_lock(&dfli->access_ok); length = surface->videoBufferAddr.length; dst_addr = dfli->surface.videoBufferAddr.startAddr; start_addr = surface->videoBufferAddr.startAddr; input_data = surface->videoBufferAddr.inputData; /* * Has DMA addr? */ if (start_addr && (!input_data)) { if (0 != addFreeBuf(freeBufList, (u8 *)surface)) { pr_debug("Error: addFreeBuf()\n"); mutex_unlock(&dfli->access_ok); kfree(surface); return -EFAULT; } else { /* pr_debug("addFreeBuf(0x%08x) ok.\n", start_addr); */ } } else { if (check_surface(fi, surface->videoMode, &surface->viewPortInfo, &surface->viewPortOffset, &surface->videoBufferAddr)) dovefb_ovly_set_par(fi); /* copy buffer */ if (input_data) { wait_for_vsync(dfli); /* if support hw DMA, replace this. */ if (copy_from_user(dfli->fb_start, input_data, length)) { mutex_unlock(&dfli->access_ok); kfree(surface); return -EFAULT; } mutex_unlock(&dfli->access_ok); kfree(surface); return 0; } kfree(surface); #if 0 /* * Fix me: Currently not implemented yet. * Application allocate a physical contiguous * buffer and pass it into driver. Here is to * update fb's info to new buffer and free * old buffer. */ if (start_addr) { if (dfli->mem_status) free_pages( (unsigned long)dfli->fb_start, get_order(dfli->fb_size)); else dma_free_writecombine(dfli->dev, dfli->fb_size, dfli->fb_start, dfli->fb_start_dma); dfli->fb_start = __va(start_addr); dfli->fb_size = length; dfli->fb_start_dma = (dma_addr_t)__pa(dfli->fb_start); dfli->mem_status = 1; fi->fix.smem_start = dfli->fb_start_dma; fi->fix.smem_len = dfli->fb_size; fi->screen_base = dfli->fb_start; fi->screen_size = dfli->fb_size; } #endif } mutex_unlock(&dfli->access_ok); return 0; } case DOVEFB_IOCTL_GET_FREELIST: { mutex_lock(&dfli->access_ok); if (copy_to_user(argp, filterBufList, MAX_QUEUE_NUM*sizeof(u8 *))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } clearFreeBuf(filterBufList, RESET_BUF); mutex_unlock(&dfli->access_ok); return 0; } case DOVEFB_IOCTL_GET_BUFF_ADDR: { return copy_to_user(argp, &dfli->surface.videoBufferAddr, sizeof(struct _sVideoBufferAddr)) ? -EFAULT : 0; } case DOVEFB_IOCTL_SET_VID_OFFSET: mutex_lock(&dfli->access_ok); if (copy_from_user(&gViewPortOffset, argp, sizeof(gViewPortOffset))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } if (check_surface(fi, -1, 0, &gViewPortOffset, 0)) dovefb_ovly_set_par(fi); mutex_unlock(&dfli->access_ok); break; case DOVEFB_IOCTL_GET_VID_OFFSET: return copy_to_user(argp, &dfli->surface.viewPortOffset, sizeof(struct _sViewPortOffset)) ? -EFAULT : 0; case DOVEFB_IOCTL_SET_MEMORY_TOGGLE: break; case DOVEFB_IOCTL_SET_COLORKEYnALPHA: if (copy_from_user(&dfli->ckey_alpha, argp, sizeof(struct _sColorKeyNAlpha))) return -EFAULT; dovefb_ovly_set_colorkeyalpha(dfli); break; case DOVEFB_IOCTL_GET_COLORKEYnALPHA: if (copy_to_user(argp, &dfli->ckey_alpha, sizeof(struct _sColorKeyNAlpha))) return -EFAULT; break; case DOVEFB_IOCTL_SWITCH_VID_OVLY: if (copy_from_user(&vid_on, argp, sizeof(int))) return -EFAULT; if (0 == vid_on) { x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) & ~CFG_DMA_ENA_MASK; writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0); } else { x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) | CFG_DMA_ENA(0x1); writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0); /* Enable VID & VSync. */ x = readl(dfli->reg_base + SPU_IRQ_ENA) | DOVEFB_VID_INT_MASK | DOVEFB_VSYNC_INT_MASK; writel( x, dfli->reg_base + SPU_IRQ_ENA); } break; case DOVEFB_IOCTL_SWITCH_GRA_OVLY: if (copy_from_user(&gfx_on, argp, sizeof(int))) return -EFAULT; if (0 == gfx_on) { x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) & ~CFG_GRA_ENA_MASK; writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0); } else { x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) | CFG_GRA_ENA(0x1); writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0); } break; case DOVEFB_IOCTL_GET_FBID: mutex_lock(&dfli->access_ok); if (copy_to_user(argp, &dfli->cur_fbid, sizeof(unsigned int))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } mutex_unlock(&dfli->access_ok); break; case DOVEFB_IOCTL_GET_SRC_MODE: mutex_lock(&dfli->access_ok); if (copy_to_user(argp, &dfli->src_mode, sizeof(int))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } mutex_unlock(&dfli->access_ok); break; case DOVEFB_IOCTL_SET_SRC_MODE: mutex_lock(&dfli->access_ok); if (copy_from_user(&dfli->src_mode, argp, sizeof(int))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } if (SHM_NORMAL == dfli->src_mode) { int i; /* * Recycle all video buffer. */ /* 1. collect freelist buffer */ for (i = (MAX_QUEUE_NUM-1); i >= 0; i--) { if (freeBufList[i]) break; } collectFreeBuf(filterBufList, freeBufList, (i)); /* 2. Recycle current frame to filter list. */ for (i = 0; i < MAX_QUEUE_NUM; i++) { if (!filterBufList[i]) filterBufList[i] = (u8 *)dfli->new_addr; } /* clear and reset related resource. */ clearFreeBuf(freeBufList, RESET_BUF|FREE_ENTRY); dfli->new_addr = 0; dfli->cur_fbid = 0; memset(dfli->fb_start, 0, dfli->fb_size); } mutex_unlock(&dfli->access_ok); break; case DOVEFB_IOCTL_GET_FBPA: { struct shm_private_info info; int index; if (copy_from_user(&info, argp, sizeof(struct shm_private_info))) return -EFAULT; /* which frame want to find. */ index = info.fbid; /* calc physical address. */ info.fb_pa = (unsigned long)(dfli->fb_start_dma+ (index*info.width*info.height*MAX_YUV_PIXEL)); if (copy_to_user(argp, &info, sizeof(struct shm_private_info))) return -EFAULT; break; } case DOVEFB_IOCTL_NEXT_FRAME_PRESENT: { unsigned int phy_addr[3]; mutex_lock(&dfli->access_ok); if (copy_from_user(&phy_addr, argp, 3*sizeof(unsigned int))) { mutex_unlock(&dfli->access_ok); return -EFAULT; } mutex_unlock(&dfli->access_ok); dfli->vid_ovly_phys_addr_y = phy_addr[0]; dfli->vid_ovly_phys_addr_u = phy_addr[1]; dfli->vid_ovly_phys_addr_v = phy_addr[2]; break; } case DOVEFB_IOCTL_SET_INTERPOLATION_MODE: /* * Get data from user space. */ if (copy_from_user(&interpolation, argp, sizeof(interpolation))) return -EFAULT; if ((interpolation == 0) || (interpolation == 3)) writel(CFG_VSC_LINEAR(interpolation) | (readl(dfli->reg_base + SPU_IOPAD_CONTROL) & !CFG_VSC_LINEAR_MASK), dfli->reg_base + SPU_IOPAD_CONTROL); break; default: pr_debug("ioctl_ovly(0x%x) No match.\n", cmd); break; } return 0; }
/* * This routine will assign vring's allocated in host/io memory. Code in * virtio_ring.c however continues to access this io memory as if it were local * memory without io accessors. */ static struct virtqueue *vop_find_vq(struct virtio_device *dev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; struct mic_vqconfig __iomem *vqconfig; struct mic_vqconfig config; struct virtqueue *vq; void __iomem *va; struct _mic_vring_info __iomem *info; void *used; int vr_size, _vr_size, err, magic; struct vring *vr; u8 type = ioread8(&vdev->desc->type); if (index >= ioread8(&vdev->desc->num_vq)) return ERR_PTR(-ENOENT); if (!name) return ERR_PTR(-ENOENT); /* First assign the vring's allocated in host memory */ vqconfig = _vop_vq_config(vdev->desc) + index; memcpy_fromio(&config, vqconfig, sizeof(config)); _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address), vr_size); if (!va) return ERR_PTR(-ENOMEM); vdev->vr[index] = va; memset_io(va, 0x0, _vr_size); vq = vring_new_virtqueue( index, le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, dev, false, (void __force *)va, vop_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } info = va + _vr_size; magic = ioread32(&info->magic); if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { err = -EIO; goto unmap; } /* Allocate and reassign used ring now */ vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(vdev->used_size[index])); if (!used) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); goto del_vq; } vdev->used[index] = dma_map_single(&vpdev->dev, used, vdev->used_size[index], DMA_BIDIRECTIONAL); if (dma_mapping_error(&vpdev->dev, vdev->used[index])) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); goto free_used; } writeq(vdev->used[index], &vqconfig->used_address); /* * To reassign the used ring here we are directly accessing * struct vring_virtqueue which is a private data structure * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in * vring_new_virtqueue() would ensure that * (&vq->vring == (struct vring *) (&vq->vq + 1)); */ vr = (struct vring *)(vq + 1); vr->used = used; vq->priv = vdev; return vq; free_used: free_pages((unsigned long)used, get_order(vdev->used_size[index])); del_vq: vring_del_virtqueue(vq); unmap: vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); return ERR_PTR(err); }
static int mace_open(struct net_device *dev) { struct mace_data *mp = (struct mace_data *) dev->priv; volatile struct mace *mb = mp->mace; #if 0 int i; i = 200; while (--i) { mb->biucc = SWRST; if (mb->biucc & SWRST) { udelay(10); continue; } break; } if (!i) { printk(KERN_ERR "%s: software reset failed!!\n", dev->name); return -EAGAIN; } #endif mb->biucc = XMTSP_64; mb->fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | XMTBRST | RCVBRST; mb->xmtfc = AUTO_PAD_XMIT; mb->plscc = PORTSEL_AUI; /* mb->utr = RTRD; */ if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); return -EAGAIN; } if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr); free_irq(dev->irq, dev); return -EAGAIN; } /* Allocate the DMA ring buffers */ mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES); mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0); if (mp->tx_ring==NULL || mp->rx_ring==NULL) { if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES); if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0); free_irq(dev->irq, dev); free_irq(mp->dma_intr, dev); printk(KERN_ERR "%s: unable to allocate DMA buffers\n", dev->name); return -ENOMEM; } mp->rx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->rx_ring); mp->tx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->tx_ring); /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */ kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER); kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH); mace_dma_off(dev); /* Not sure what these do */ psc_write_word(PSC_ENETWR_CTL, 0x9000); psc_write_word(PSC_ENETRD_CTL, 0x9000); psc_write_word(PSC_ENETWR_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x0400); #if 0 /* load up the hardware address */ mb->iac = ADDRCHG | PHYADDR; while ((mb->iac & ADDRCHG) != 0); for (i = 0; i < 6; ++i) mb->padr = dev->dev_addr[i]; /* clear the multicast filter */ mb->iac = ADDRCHG | LOGADDR; while ((mb->iac & ADDRCHG) != 0); for (i = 0; i < 8; ++i) mb->ladrf = 0; mb->plscc = PORTSEL_GPSI + ENPLSIO; mb->maccc = ENXMT | ENRCV; mb->imr = RCVINT; #endif mace_rxdma_reset(dev); mace_txdma_reset(dev); return 0; }
static int create_packet(void *data, size_t length) { struct packet_data *newpacket; int ordernum = 0; int retval = 0; unsigned int packet_array_size = 0; void **invalid_addr_packet_array = NULL; void *packet_data_temp_buf = NULL; unsigned int idx = 0; pr_debug("create_packet: entry \n"); if (!rbu_data.packetsize) { pr_debug("create_packet: packetsize not specified\n"); retval = -EINVAL; goto out_noalloc; } spin_unlock(&rbu_data.lock); newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL); if (!newpacket) { printk(KERN_WARNING "dell_rbu:%s: failed to allocate new " "packet\n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_noalloc; } ordernum = get_order(length); /* * BIOS errata mean we cannot allocate packets below 1MB or they will * be overwritten by BIOS. * * array to temporarily hold packets * that are below the allocation floor * * NOTE: very simplistic because we only need the floor to be at 1MB * due to BIOS errata. This shouldn't be used for higher floors * or you will run out of mem trying to allocate the array. */ packet_array_size = max( (unsigned int)(allocation_floor / rbu_data.packetsize), (unsigned int)1); invalid_addr_packet_array = kzalloc(packet_array_size * sizeof(void*), GFP_KERNEL); if (!invalid_addr_packet_array) { printk(KERN_WARNING "dell_rbu:%s: failed to allocate " "invalid_addr_packet_array \n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_alloc_packet; } while (!packet_data_temp_buf) { packet_data_temp_buf = (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); if (!packet_data_temp_buf) { printk(KERN_WARNING "dell_rbu:%s: failed to allocate new " "packet\n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_alloc_packet_array; } #ifdef CONFIG_XEN if (ordernum && xen_create_contiguous_region( (unsigned long)packet_data_temp_buf, ordernum, 0)) { free_pages((unsigned long)packet_data_temp_buf, ordernum); printk(KERN_WARNING "dell_rbu:%s: failed to adjust new " "packet\n", __func__); retval = -ENOMEM; spin_lock(&rbu_data.lock); goto out_alloc_packet_array; } #endif if ((unsigned long)virt_to_bus(packet_data_temp_buf) < allocation_floor) { #ifdef CONFIG_XEN if (ordernum) xen_destroy_contiguous_region( (unsigned long)packet_data_temp_buf, ordernum); #endif pr_debug("packet 0x%lx below floor at 0x%lx.\n", (unsigned long)virt_to_phys( packet_data_temp_buf), allocation_floor); invalid_addr_packet_array[idx++] = packet_data_temp_buf; packet_data_temp_buf = NULL; } } spin_lock(&rbu_data.lock); newpacket->data = packet_data_temp_buf; pr_debug("create_packet: newpacket at physical addr %lx\n", (unsigned long)virt_to_bus(newpacket->data)); /* packets may not have fixed size */ newpacket->length = length; newpacket->ordernum = ordernum; ++rbu_data.num_packets; /* initialize the newly created packet headers */ INIT_LIST_HEAD(&newpacket->list); list_add_tail(&newpacket->list, &packet_data_head.list); memcpy(newpacket->data, data, length); pr_debug("create_packet: exit \n"); out_alloc_packet_array: /* always free packet array */ for (;idx>0;idx--) { pr_debug("freeing unused packet below floor 0x%lx.\n", (unsigned long)virt_to_bus( invalid_addr_packet_array[idx-1])); free_pages((unsigned long)invalid_addr_packet_array[idx-1], ordernum); } kfree(invalid_addr_packet_array); out_alloc_packet: /* if error, free data */ if (retval) kfree(newpacket); out_noalloc: return retval; }
void free_thread_info(struct thread_info *ti) { free_thread_xstate(ti->task); free_pages((unsigned long)ti, get_order(THREAD_SIZE)); }
/* * img_update_realloc: This function allocates the contiguous pages to * accommodate the requested size of data. The memory address and size * values are stored globally and on every call to this function the new * size is checked to see if more data is required than the existing size. * If true the previous memory is freed and new allocation is done to * accommodate the new size. If the incoming size is less then than the * already allocated size, then that memory is reused. This function is * called with lock held and returns with lock held. */ static int img_update_realloc(unsigned long size) { unsigned char *image_update_buffer = NULL; unsigned long rc; #ifndef CONFIG_XEN unsigned long img_buf_phys_addr; #endif int ordernum; int dma_alloc = 0; /* * check if the buffer of sufficient size has been * already allocated */ if (rbu_data.image_update_buffer_size >= size) { /* * check for corruption */ if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { printk(KERN_ERR "dell_rbu:%s: corruption " "check failed\n", __func__); return -EINVAL; } /* * we have a valid pre-allocated buffer with * sufficient size */ return 0; } /* * free any previously allocated buffer */ img_update_free(); spin_unlock(&rbu_data.lock); #ifndef CONFIG_XEN ordernum = get_order(size); image_update_buffer = (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); img_buf_phys_addr = (unsigned long) virt_to_bus(image_update_buffer); if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { free_pages((unsigned long) image_update_buffer, ordernum); #else { #endif ordernum = -1; image_update_buffer = dma_alloc_coherent(NULL, size, &dell_rbu_dmaaddr, GFP_KERNEL); dma_alloc = 1; } spin_lock(&rbu_data.lock); if (image_update_buffer != NULL) { rbu_data.image_update_buffer = image_update_buffer; rbu_data.image_update_buffer_size = size; rbu_data.bios_image_size = rbu_data.image_update_buffer_size; rbu_data.image_update_ordernum = ordernum; rbu_data.dma_alloc = dma_alloc; rc = 0; } else { pr_debug("Not enough memory for image update:" "size = %ld\n", size); rc = -ENOMEM; } return rc; } static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count) { int retval; size_t bytes_left; size_t data_length; char *ptempBuf = buffer; /* check to see if we have something to return */ if (rbu_data.num_packets == 0) { pr_debug("read_packet_data: no packets written\n"); retval = -ENOMEM; goto read_rbu_data_exit; } if (pos > rbu_data.imagesize) { retval = 0; printk(KERN_WARNING "dell_rbu:read_packet_data: " "data underrun\n"); goto read_rbu_data_exit; } bytes_left = rbu_data.imagesize - pos; data_length = min(bytes_left, count); if ((retval = packet_read_list(ptempBuf, &data_length)) < 0) goto read_rbu_data_exit; if ((pos + count) > rbu_data.imagesize) { rbu_data.packet_read_count = 0; /* this was the last copy */ retval = bytes_left; } else retval = count; read_rbu_data_exit: return retval; }
// LAB2: below code is used to check the first fit allocation algorithm (your EXERCISE 1) // NOTICE: You SHOULD NOT CHANGE basic_check, default_check functions! static void default_check(void) { int count = 0, total = 0; list_entry_t *le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); assert(PageProperty(p)); count ++, total += p->property; } assert(total == nr_free_pages()); basic_check(); struct Page *p0 = alloc_pages(5), *p1, *p2; assert(p0 != NULL); assert(!PageProperty(p0)); list_entry_t free_list_store = free_list; list_init(&free_list); assert(list_empty(&free_list)); assert(alloc_page() == NULL); unsigned int nr_free_store = nr_free; nr_free = 0; free_pages(p0 + 2, 3); assert(alloc_pages(4) == NULL); assert(PageProperty(p0 + 2) && p0[2].property == 3); assert((p1 = alloc_pages(3)) != NULL); assert(alloc_page() == NULL); assert(p0 + 2 == p1); p2 = p0 + 1; free_page(p0); free_pages(p1, 3); assert(PageProperty(p0) && p0->property == 1); assert(PageProperty(p1) && p1->property == 3); assert((p0 = alloc_page()) == p2 - 1); free_page(p0); assert((p0 = alloc_pages(2)) == p2 + 1); free_pages(p0, 2); free_page(p2); assert((p0 = alloc_pages(5)) != NULL); assert(alloc_page() == NULL); assert(nr_free == 0); nr_free = nr_free_store; free_list = free_list_store; free_pages(p0, 5); le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); count --, total -= p->property; } assert(count == 0); assert(total == 0); }
int __init pool_benchmark(void) { long i, j, t0, t1; ngx_pool_t *np; TfwPool *tp; printk(KERN_ERR "object sizes: Small - %lu, Big - %lu Huge - %lu\n", sizeof(Small), sizeof(Big), sizeof(Huge)); t0 = jiffies; for (i = 0; i < N_ALLOC; ++i) { p_arr[i] = (void *)__get_free_pages(GFP_KERNEL, 0); touch_obj(p_arr[i]); } for (i = 0; i < N_ALLOC; ++i) free_pages((unsigned long)p_arr[i], 0); t1 = jiffies; printk(KERN_ERR "alloc & free: %ldms\n", t1 - t0); /*****************************************************************/ np = ngx_create_pool(PAGE_SIZE); BUG_ON(!np); t0 = jiffies; for (i = 0; i < N; ++i) { Small *o = (Small *)ngx_palloc(np, sizeof(Small)); touch_obj(o); } t1 = jiffies; ngx_destroy_pool(np); /* HZ = 1000 for my kernel. */ printk(KERN_ERR "ngx_pool (Small): %ldms\n", t1 - t0); np = ngx_create_pool(PAGE_SIZE); BUG_ON(!np); t0 = jiffies; for (i = 0; i < N * sizeof(Small) / sizeof(Big); ++i) { Big *o = (Big *)ngx_palloc(np, sizeof(Big)); touch_obj(o); } t1 = jiffies; ngx_destroy_pool(np); printk(KERN_ERR "ngx_pool (Big): %ldms\n", t1 - t0); np = ngx_create_pool(PAGE_SIZE); BUG_ON(!np); t0 = jiffies; for (i = 0; i < N; ++i) { if (unlikely(!(i & 0xfff))) { Huge *o = (Huge *)ngx_palloc(np, sizeof(*o)); touch_obj(o); if (!(i & 1)) ngx_pfree(np, o); } else if (unlikely(!(i & 3))) { Big *o = (Big *)ngx_palloc(np, sizeof(*o)); touch_obj(o); } else { Small *o = (Small *)ngx_palloc(np, sizeof(*o)); touch_obj(o); } } t1 = jiffies; ngx_destroy_pool(np); printk(KERN_ERR "ngx_pool w/ free (Mix): %ldms\n", t1 - t0); t0 = jiffies; for (i = 0; i < N / 100; ++i) { np = ngx_create_pool(PAGE_SIZE); for (j = 0; j < 100; ++j) { if (unlikely(!(i & 3))) { Big *o = (Big *)ngx_palloc(np, sizeof(*o)); touch_obj(o); } else { Small *o; o = (Small *)ngx_palloc(np, sizeof(*o)); touch_obj(o); } } ngx_destroy_pool(np); } t1 = jiffies; printk(KERN_ERR "ngx_pool cr. & destr.: %ldms\n", t1 - t0); /*****************************************************************/ tp = __tfw_pool_new(0); BUG_ON(!tp); t0 = jiffies; for (i = 0; i < N; ++i) { Small *o = (Small *)tfw_pool_alloc(tp, sizeof(Small)); touch_obj(o); } t1 = jiffies; tfw_pool_destroy(tp); printk(KERN_ERR "tfw_pool (Small): %ldms\n", t1 - t0); tp = __tfw_pool_new(0); BUG_ON(!tp); t0 = jiffies; for (i = 0; i < N; ++i) { Small *o = (Small *)tfw_pool_alloc(tp, sizeof(Small)); touch_obj(o); if (unlikely(!(i & 3))) tfw_pool_free(tp, o, sizeof(Small)); } t1 = jiffies; tfw_pool_destroy(tp); printk(KERN_ERR "tfw_pool w/ free (Small): %ldms\n", t1 - t0); tp = __tfw_pool_new(0); BUG_ON(!tp); t0 = jiffies; for (i = 0; i < N * sizeof(Small) / sizeof(Big); ++i) { Big *o = (Big *)tfw_pool_alloc(tp, sizeof(Big)); touch_obj(o); } t1 = jiffies; tfw_pool_destroy(tp); printk(KERN_ERR "tfw_pool (Big): %ldms\n", t1 - t0); tp = __tfw_pool_new(0); BUG_ON(!tp); t0 = jiffies; for (i = 0; i < N * sizeof(Small) / sizeof(Big); ++i) { Big *o = (Big *)tfw_pool_alloc(tp, sizeof(Big)); touch_obj(o); if (unlikely(!(i & 3))) tfw_pool_free(tp, o, sizeof(Big)); } t1 = jiffies; tfw_pool_destroy(tp); printk(KERN_ERR "tfw_pool w/ free (Big): %ldms\n", t1 - t0); tp = __tfw_pool_new(0); BUG_ON(!tp); t0 = jiffies; for (i = 0; i < N; ++i) { if (unlikely(!(i & 0xfff))) { Huge *o = (Huge *)tfw_pool_alloc(tp, sizeof(*o)); touch_obj(o); if (!(i & 1)) tfw_pool_free(tp, o, sizeof(*o)); } else if (unlikely(!(i & 3))) { Big *o = (Big *)tfw_pool_alloc(tp, sizeof(*o)); touch_obj(o); if (!(i & 1)) tfw_pool_free(tp, o, sizeof(*o)); } else { Small *o = (Small *)tfw_pool_alloc(tp, sizeof(*o)); touch_obj(o); } } t1 = jiffies; tfw_pool_destroy(tp); printk(KERN_ERR "tfw_pool w/ free (Mix): %ldms\n", t1 - t0); t0 = jiffies; for (i = 0; i < N / 100; ++i) { tp = __tfw_pool_new(0); for (j = 0; j < 100; ++j) { if (unlikely(!(i & 3))) { Big *o = (Big *)tfw_pool_alloc(tp, sizeof(*o)); touch_obj(o); } else { Small *o; o = (Small *)tfw_pool_alloc(tp, sizeof(*o)); touch_obj(o); } } tfw_pool_destroy(tp); } t1 = jiffies; printk(KERN_ERR "tfw_pool cr. & destr.: %ldms\n", t1 - t0); return 0; }
/* * Setup MobiCore kernel log. It assumes it's running on CORE 0! * The fastcall will complain is that is not the case! */ long mobicore_log_setup(void) { unsigned long phys_log_buf; union fc_generic fc_log; struct sched_param param = { .sched_priority = 1 }; long ret; log_pos = 0; log_buf = NULL; log_thread = NULL; log_line = NULL; log_line_len = 0; prev_eol = false; prev_source = 0; thread_err = 0; /* Sanity check for the log size */ if (log_size < PAGE_SIZE) return -EFAULT; else log_size = PAGE_ALIGN(log_size); log_line = kzalloc(LOG_LINE_SIZE, GFP_KERNEL); if (IS_ERR(log_line)) { MCDRV_DBG_ERROR(mcd, "failed to allocate log line!"); return -ENOMEM; } log_thread = kthread_create(log_worker, NULL, "mc_log"); if (IS_ERR(log_thread)) { MCDRV_DBG_ERROR(mcd, "MobiCore log thread creation failed!"); ret = -EFAULT; goto err_free_line; } sched_setscheduler(log_thread, SCHED_IDLE, ¶m); /* * We are going to map this buffer into virtual address space in SWd. * To reduce complexity there, we use a contiguous buffer. */ log_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(log_size)); if (!log_buf) { MCDRV_DBG_ERROR(mcd, "Failed to get page for logger!"); ret = -ENOMEM; goto err_stop_kthread; } phys_log_buf = virt_to_phys(log_buf); memset(&fc_log, 0, sizeof(fc_log)); fc_log.as_in.cmd = MC_FC_NWD_TRACE; fc_log.as_in.param[0] = phys_log_buf; fc_log.as_in.param[1] = log_size; MCDRV_DBG(mcd, "fc_log virt=%p phys=%p ", log_buf, (void *)phys_log_buf); mc_fastcall(&fc_log); MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret); /* If the setup failed we must free the memory allocated */ if (fc_log.as_out.ret) { MCDRV_DBG_ERROR(mcd, "MobiCore shared traces setup failed!"); free_pages((unsigned long)log_buf, get_order(log_size)); log_buf = NULL; ret = -EIO; goto err_stop_kthread; } set_task_state(log_thread, TASK_INTERRUPTIBLE); MCDRV_DBG(mcd, "fc_log Logger version %u\n", log_buf->version); return 0; err_stop_kthread: kthread_stop(log_thread); log_thread = NULL; err_free_line: kfree(log_line); log_line = NULL; return ret; } /* * Free kernel log components. * ATTN: We can't free the log buffer because it's also in use by MobiCore and * even if the module is unloaded MobiCore is still running. */ void mobicore_log_free(void) { if (log_thread && !IS_ERR(log_thread)) { /* We don't really care what the thread returns for exit */ kthread_stop(log_thread); } kfree(log_line); }
static void dma_free(void *vaddr, size_t size) { vaddr = (void *)KSEG0ADDR(vaddr); free_pages((unsigned long) vaddr, get_order(size)); }
int met_save_log_real(const char *pathname) { int len, ret = 0; struct file *infp = NULL; struct file *outfp = NULL; void *ptr = NULL; mm_segment_t oldfs; infp = filp_open("/sys/kernel/debug/tracing/trace", O_RDONLY, 0); if (unlikely(infp == NULL)) { ERRF("can not open trace file for read\n"); ret = -1; goto save_out; } outfp = filp_open(pathname, O_WRONLY|O_TRUNC|O_CREAT, 0644); if (unlikely(outfp == NULL)) { ERRF("can not open saved file for write\n"); ret = -2; goto save_out; } ptr = (void*)__get_free_pages(GFP_KERNEL, 2); if (ptr == NULL) { ERRF("can not allocate buffer to copy\n"); ret = -3; goto save_out; } oldfs = get_fs(); set_fs(KERNEL_DS); while (1) { len = vfs_read(infp, ptr, PAGE_SIZE << 2, &(infp->f_pos)); if (len < 0) { ERRF("can not read from trace file\n"); ret = -3; break; } else if (len == 0) { break; } ret = vfs_write(outfp, ptr, len, &(outfp->f_pos)); if (ret < 0) { ERRF("can not write to saved file\n"); break; } } set_fs(oldfs); save_out: if (ptr !=NULL) { free_pages((unsigned long)ptr, 2); } if (infp != NULL) { filp_close(infp, NULL); } if (outfp != NULL) { filp_close(outfp, NULL); } return ret; }
static inline void Fb_unmap_video_memory(struct fb_info *info) { unsigned map_size = PAGE_ALIGN(info->fix.smem_len); free_pages((unsigned long)info->screen_base,get_order(map_size)); }
static void check_swap(void) { //backup mem env int ret, count = 0, total = 0, i; list_entry_t *le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); assert(PageProperty(p)); count ++, total += p->property; } assert(total == nr_free_pages()); cprintf("BEGIN check_swap: count %d, total %d\n",count,total); //now we set the phy pages env struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pde_t *pgdir = mm->pgdir = boot_pgdir; assert(pgdir[0] == 0); struct vma_struct *vma = vma_create(BEING_CHECK_VALID_VADDR, CHECK_VALID_VADDR, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); //setup the temp Page Table vaddr 0~4MB cprintf("setup Page Table for vaddr 0X1000, so alloc a page\n"); pte_t *temp_ptep=NULL; temp_ptep = get_pte(mm->pgdir, BEING_CHECK_VALID_VADDR, 1); assert(temp_ptep!= NULL); cprintf("setup Page Table vaddr 0~4MB OVER!\n"); for (i=0;i<CHECK_VALID_PHY_PAGE_NUM;i++) { check_rp[i] = alloc_page(); assert(check_rp[i] != NULL ); assert(!PageProperty(check_rp[i])); } list_entry_t free_list_store = free_list; list_init(&free_list); assert(list_empty(&free_list)); //assert(alloc_page() == NULL); unsigned int nr_free_store = nr_free; nr_free = 0; for (i=0;i<CHECK_VALID_PHY_PAGE_NUM;i++) { free_pages(check_rp[i],1); } assert(nr_free==CHECK_VALID_PHY_PAGE_NUM); cprintf("set up init env for check_swap begin!\n"); //setup initial vir_page<->phy_page environment for page relpacement algorithm pgfault_num=0; check_content_set(); assert( nr_free == 0); for(i = 0; i<MAX_SEQ_NO ; i++) swap_out_seq_no[i]=swap_in_seq_no[i]=-1; for (i= 0;i<CHECK_VALID_PHY_PAGE_NUM;i++) { check_ptep[i]=0; check_ptep[i] = get_pte(pgdir, (i+1)*0x1000, 0); //cprintf("i %d, check_ptep addr %x, value %x\n", i, check_ptep[i], *check_ptep[i]); assert(check_ptep[i] != NULL); assert(pte2page(*check_ptep[i]) == check_rp[i]); assert((*check_ptep[i] & PTE_P)); } cprintf("set up init env for check_swap over!\n"); // now access the virt pages to test page relpacement algorithm ret=check_content_access(); assert(ret==0); //restore kernel mem env for (i=0;i<CHECK_VALID_PHY_PAGE_NUM;i++) { free_pages(check_rp[i],1); } //free_page(pte2page(*temp_ptep)); free_page(pde2page(pgdir[0])); pgdir[0] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; nr_free = nr_free_store; free_list = free_list_store; le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); count --, total -= p->property; } cprintf("count is %d, total is %d\n",count,total); //assert(count == 0); cprintf("check_swap() succeeded!\n"); }
static void alpha_noop_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) { free_pages((unsigned long)cpu_addr, get_order(size)); }
int main(int argc, char **argv) { tst_parse_opts(argc, argv, NULL, NULL); setup(); #if HAVE_NUMA_MOVE_PAGES unsigned int i; int lc; unsigned int from_node; unsigned int to_node; int ret; ret = get_allowed_nodes(NH_MEMS, 2, &from_node, &to_node); if (ret < 0) tst_brkm(TBROK | TERRNO, cleanup, "get_allowed_nodes: %d", ret); /* check for looping state if -i option is given */ for (lc = 0; TEST_LOOPING(lc); lc++) { void *pages[N_TEST_PAGES] = { 0 }; int nodes[N_TEST_PAGES]; int status[N_TEST_PAGES]; pid_t cpid; sem_t *sem; /* reset tst_count in case we are looping */ tst_count = 0; ret = alloc_shared_pages_on_node(pages + SHARED_PAGE, N_SHARED_PAGES, from_node); if (ret == -1) continue; ret = alloc_pages_on_node(pages + UNSHARED_PAGE, N_UNSHARED_PAGES, from_node); if (ret == -1) goto err_free_shared; for (i = 0; i < N_TEST_PAGES; i++) { nodes[i] = to_node; } sem = alloc_sem(MAX_SEMS); if (sem == NULL) { goto err_free_unshared; } /* * Fork a child process so that the shared pages are * now really shared between two processes. */ cpid = fork(); if (cpid == -1) { tst_resm(TBROK, "forking child failed"); goto err_free_sem; } else if (cpid == 0) { child(pages, sem); } /* Wait for child to setup and signal. */ if (sem_wait(&sem[SEM_CHILD_SETUP]) == -1) tst_resm(TWARN | TERRNO, "error wait semaphore"); ret = numa_move_pages(0, N_TEST_PAGES, pages, nodes, status, MPOL_MF_MOVE); if (ret == -1) { tst_resm(TFAIL | TERRNO, "move_pages unexpectedly failed"); goto err_kill_child; } if (status[SHARED_PAGE] == -EACCES) tst_resm(TPASS, "status[%d] set to expected -EACCES", SHARED_PAGE); else tst_resm(TFAIL, "status[%d] is %d", SHARED_PAGE, status[SHARED_PAGE]); err_kill_child: /* Test done. Ask child to terminate. */ if (sem_post(&sem[SEM_PARENT_TEST]) == -1) tst_resm(TWARN | TERRNO, "error post semaphore"); /* Read the status, no zombies! */ wait(NULL); err_free_sem: free_sem(sem, MAX_SEMS); err_free_unshared: free_pages(pages + UNSHARED_PAGE, N_UNSHARED_PAGES); err_free_shared: free_shared_pages(pages + SHARED_PAGE, N_SHARED_PAGES); } #else tst_resm(TCONF, "move_pages support not found."); #endif cleanup(); tst_exit(); }
/* * This is the ioctl implementation. */ static long kern_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { void *ptr = NULL; unsigned long addr = -1; dma_addr_t dma_handle; unsigned long size; /* int res; */ PR_DEBUG("start"); switch (cmd) { /* * kmalloc function. * * One argument which is the size to allocate */ case IOCTL_DEMO_KMALLOC: size = arg * PAGE_SIZE; ptr = kmalloc(GFP_KERNEL, size); if (IS_ERR(ptr)) { PR_ERROR("unable to allocate %lu", size); return PTR_ERR(ptr); } addr = (unsigned int)ptr; if (addr % PAGE_SIZE != 0) { PR_ERROR("page size issue with addr=%lu", addr); return -EFAULT; } addr = -1; kfree(ptr); ptr = NULL; return 0; /* * __get_free_pages function. * * One argument which is the size to allocate */ case IOCTL_DEMO_GET_FREE_PAGES: size = arg * PAGE_SIZE; addr = __get_free_pages(GFP_KERNEL, get_order(size)); if (addr == 0) { /* if(IS_ERR_VALUE(addr)) { */ PR_ERROR("unable to allocate %lu", size); return -EFAULT; } if (addr % PAGE_SIZE != 0) { PR_ERROR("page size issue with addr=%lu", addr); return -EFAULT; } free_pages(addr, get_order(size)); PR_DEBUG("addr is %lx, mod is %ld", addr, addr % PAGE_SIZE); addr = -1; return 0; /* * PCI allocation function */ case IOCTL_DEMO_PCI_ALLOC_CONSISTENT: size = arg * PAGE_SIZE; ptr = pci_alloc_consistent(NULL, size, &dma_handle); if (IS_ERR(ptr)) { PR_ERROR("unable to allocate %lu", size); return PTR_ERR(ptr); } addr = (unsigned int)ptr; if (addr % PAGE_SIZE != 0) { PR_ERROR("page size issue with addr=%lu", addr); return -EFAULT; } addr = -1; pci_free_consistent(NULL, size, ptr, dma_handle); ptr = NULL; return 0; case IOCTL_DEMO_DMA_ALLOC_COHERENT: size = arg * PAGE_SIZE; ptr = dma_alloc_coherent(my_device, size, &dma_handle, GFP_KERNEL); if (IS_ERR(ptr)) { PR_ERROR("unable to allocate %lu", size); return PTR_ERR(ptr); } addr = (unsigned int)ptr; if (addr % PAGE_SIZE != 0) { PR_ERROR("page size issue with addr=%lu", addr); return -EFAULT; } addr = -1; dma_free_coherent(my_device, size, ptr, dma_handle); ptr = NULL; return 0; } return -EINVAL; }
/** * percpu_ida_destroy - release a tag pool's resources * @pool: pool to free * * Frees the resources allocated by percpu_ida_init(). */ void percpu_ida_destroy(struct percpu_ida *pool) { free_percpu(pool->tag_cpu); free_pages((unsigned long) pool->freelist, get_order(pool->nr_tags * sizeof(unsigned))); }
/* routine to allocate DMA buffer RAM of size (in bytes). Returns 0 on success, and <0 on fail */ static int get_dma_buffer(ssize_t size) { ssize_t bytes_to_get = size & ~0x3; /* get long-aligned */ ssize_t usedbytes; unsigned long page_order; void * bufferpiece; dma_addr_t busaddress; /* bus address of DMA buffer */ struct dma_page_pointer * currbuf; struct dma_page_pointer * tmpbuf; /* check multi pages */ struct page *spa; unsigned long pflags; int pcnt,i; /* reset dma pointer buffer */ currbuf=NULL; /* dma_main_pointer; */ /* NULL if no buffer exists */ /* still have to get only small pieces.... */ page_order = 4; /* page_order = get_order(bytes_to_get); */ if (page_order >= MAX_ORDER) page_order=MAX_ORDER; while (bytes_to_get>0) { /* shrink size if possible */ while((page_order>0) && (PAGE_SIZE<<(page_order-1))>=bytes_to_get) page_order--; bufferpiece = (void *)__get_free_pages(GFP_KERNEL,page_order); if (bufferpiece) { #ifndef DONT_USE_SETPAGE /* repair missing page counts */ add_individual_page_counts(bufferpiece, page_order); #endif /* get block structure */ for (i=0;i<(1 << page_order);i++) { spa=virt_to_page(bufferpiece+i*PAGE_SIZE); pcnt=page_count(spa); pflags = spa->flags; } busaddress = virt_to_bus(bufferpiece); /* success: make new entry in chain */ tmpbuf = (dma_page_pointer *) kmalloc(sizeof(dma_page_pointer), GFP_KERNEL); /* first, get buffer */ if (!tmpbuf) { printk(" Wruagh - kmalloc failed for buffer pointer....\n"); free_pages((unsigned long)bufferpiece,page_order); /* give it back */ printk("kmalloc failed during DMA buffer alloc. better reboot.\n"); return -ENOMEM; } if (currbuf) { /* there is already a structure */ /* fill new struct; currbuf points to last structure filled */ tmpbuf->next=currbuf->next; tmpbuf->previous=currbuf; /* insert in chain */ currbuf->next->previous=tmpbuf;currbuf->next=tmpbuf; currbuf=tmpbuf; } else { tmpbuf->previous=tmpbuf; tmpbuf->next=tmpbuf; /* fill new struct */ currbuf=tmpbuf; dma_main_pointer=currbuf; /* set main pointer */ }; /* fill structure with actual buffer info */ usedbytes = PAGE_SIZE<<page_order; currbuf->fullsize = usedbytes; /* all allocated bytes */ usedbytes = (usedbytes>bytes_to_get?bytes_to_get:usedbytes); currbuf->size=usedbytes; /* get useful size into buffer */ currbuf->order=page_order; /* needed for free_pages */ currbuf->buffer=bufferpiece; /* kernel address of buffer */ currbuf->physbuf=busaddress; /* PCI bus address */ /* less work to do.. */ bytes_to_get -= usedbytes; } else { /* could not get the large mem piece. try smaller ones */ if (page_order>0) { page_order--; continue; } else { break; /* stop and clean up in case of problems */ }; } } if (bytes_to_get <=0) return 0; /* everything went fine.... */ /* cleanup of unused buffers and pointers with standard release code */ release_dma_buffer(); return -ENOMEM; }