void add_usable_mem_rgns(unsigned long long base, unsigned long long size)
{
	int i;
	unsigned long long end = base + size;
	unsigned long long ustart, uend;

	base = _ALIGN_DOWN(base, getpagesize());
	end = _ALIGN_UP(end, getpagesize());

	for (i = 0; i < usablemem_rgns.size; i++) {
		ustart = usablemem_rgns.ranges[i].start;
		uend = usablemem_rgns.ranges[i].end;
		if (base < uend && end > ustart) {
			if ((base >= ustart) && (end <= uend))
				return;
			if (base < ustart && end > uend) {
				usablemem_rgns.ranges[i].start = base;
				usablemem_rgns.ranges[i].end = end;
				return;
			} else if (base < ustart) {
				usablemem_rgns.ranges[i].start = base;
				return;
			} else if (end > uend) {
				usablemem_rgns.ranges[i].end = end;
				return;
			}
		}
	}
	usablemem_rgns.ranges[usablemem_rgns.size].start = base;
	usablemem_rgns.ranges[usablemem_rgns.size++].end = end;

	dbgprintf("usable memory rgns size:%u base:%llx size:%llx\n",
		usablemem_rgns.size, base, size);
}
示例#2
0
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}
示例#3
0
bool check_used_range(puint_t test, puint_t fa, size_t sa) {
    ruint_t from = ALIGN(fa);
    sa += fa - from;
    ruint_t size = _ALIGN_UP(sa);
    if (from <= test && test < from + size) {
        return true;
    }
    return false;
}
示例#4
0
static int __init early_parse_ps3fb(char *p)
{
	if (!p)
		return 1;

	ps3fb_videomemory.size = _ALIGN_UP(memparse(p, &p),
					   ps3fb_videomemory.align);
	return 0;
}
示例#5
0
void add_usable_mem_rgns(unsigned long long base, unsigned long long size)
{
	unsigned int i;
	unsigned long long end = base + size;
	unsigned long long ustart, uend;

	base = _ALIGN_DOWN(base, getpagesize());
	end = _ALIGN_UP(end, getpagesize());

	for (i=0; i < usablemem_rgns.size; i++) {
		ustart = usablemem_rgns.ranges[i].start;
		uend = usablemem_rgns.ranges[i].end;
		if (base < uend && end > ustart) {
			if ((base >= ustart) && (end <= uend))
				return;
			if (base < ustart && end > uend) {
				usablemem_rgns.ranges[i].start = base;
				usablemem_rgns.ranges[i].end = end;
#ifdef DEBUG
				fprintf(stderr, "usable memory rgn %u: new base:%llx new size:%llx\n",
					i, base, size);
#endif
				return;
			} else if (base < ustart) {
				usablemem_rgns.ranges[i].start = base;
#ifdef DEBUG
				fprintf(stderr, "usable memory rgn %u: new base:%llx new size:%llx",
					i, base, usablemem_rgns.ranges[i].end - base);
#endif
				return;
			} else if (end > uend){
				usablemem_rgns.ranges[i].end = end;
#ifdef DEBUG
				fprintf(stderr, "usable memory rgn %u: new end:%llx, new size:%llx",
					i, end, end - usablemem_rgns.ranges[i].start);
#endif
				return;
			}
		}
	}
	usablemem_rgns.ranges[usablemem_rgns.size].start = base;
	usablemem_rgns.ranges[usablemem_rgns.size++].end = end;

	dbgprintf("usable memory rgns size:%u base:%llx size:%llx\n",
		usablemem_rgns.size, base, size);
}
示例#6
0
void __init allocate_pacas(void)
{
	u64 limit;
	int cpu;
	int nr_cpus;

	limit = ppc64_rma_size;

#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * We can't take SLB misses on the paca, and we want to access them
	 * in real mode, so allocate them within the RMA and also within
	 * the first segment.
	 */
	limit = min(0x10000000ULL, limit);
#endif

	/*
	 * Always align up the nr_cpu_ids to SMT threads and allocate
	 * the paca. This will help us to prepare for a situation where
	 * boot cpu id > nr_cpus_id. We will use the last nthreads
	 * slots (nthreads == threads per core) to accommodate a core
	 * that contains boot cpu thread.
	 *
	 * Do not change nr_cpu_ids value here. Let us do that in
	 * early_init_dt_scan_cpus() where we know exact value
	 * of threads per core.
	 */
	nr_cpus = _ALIGN_UP(nr_cpu_ids, MAX_SMT);
	paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);

	paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
	memset(paca, 0, paca_size);

	printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
		paca_size, nr_cpus, paca);

	allocate_lppacas(nr_cpus, limit);

	allocate_slb_shadows(nr_cpus, limit);

	/* Can't use for_each_*_cpu, as they aren't functional yet */
	for (cpu = 0; cpu < nr_cpus; cpu++)
		initialise_paca(&paca[cpu], cpu);
}
示例#7
0
文件: of.c 项目: mrtos/Logitech-Revue
static void *of_try_claim(unsigned long size)
{
	unsigned long addr = 0;

	if (claim_base == 0)
		claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);

	for(; claim_base < RAM_END; claim_base += ONE_MB) {
#ifdef DEBUG
		printf("    trying: 0x%08lx\n\r", claim_base);
#endif
		addr = (unsigned long)of_claim(claim_base, size, 0);
		if ((void *)addr != (void *)-1)
			break;
	}
	if (addr == 0)
		return NULL;
	claim_base = PAGE_ALIGN(claim_base + size);
	return (void *)addr;
}
static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
{
	int i, ret;
	u64 lpar_addr, lpar_size;

	BUG_ON(!firmware_has_feature(FW_FEATURE_PS3_LV1));
	BUG_ON(dev->match_id != PS3_MATCH_ID_SOUND);

	the_card.ps3_dev = dev;

	ret = ps3_open_hv_device(dev);

	if (ret)
		return -ENXIO;

	
	ret = lv1_gpu_device_map(2, &lpar_addr, &lpar_size);
	if (ret) {
		pr_info("%s: device map 2 failed %d\n", __func__, ret);
		goto clean_open;
	}
	ps3_mmio_region_init(dev, dev->m_region, lpar_addr, lpar_size,
		PAGE_SHIFT);

	ret = snd_ps3_map_mmio();
	if (ret)
		goto clean_dev_map;

	
	ps3_dma_region_init(dev, dev->d_region,
			    PAGE_SHIFT, 
			    0, 
			    NULL,
			    _ALIGN_UP(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE));
	dev->d_region->ioid = PS3_AUDIO_IOID;

	ret = ps3_dma_region_create(dev->d_region);
	if (ret) {
		pr_info("%s: region_create\n", __func__);
		goto clean_mmio;
	}

	snd_ps3_audio_set_base_addr(dev->d_region->bus_addr);

	
	the_card.start_delay = snd_ps3_start_delay;

	
	if (snd_ps3_allocate_irq()) {
		ret = -ENXIO;
		goto clean_dma_region;
	}

	
	ret = snd_card_create(index, id, THIS_MODULE, 0, &the_card.card);
	if (ret < 0)
		goto clean_irq;

	strcpy(the_card.card->driver, "PS3");
	strcpy(the_card.card->shortname, "PS3");
	strcpy(the_card.card->longname, "PS3 sound");

	
	for (i = 0; i < ARRAY_SIZE(spdif_ctls); i++) {
		ret = snd_ctl_add(the_card.card,
				  snd_ctl_new1(&spdif_ctls[i], &the_card));
		if (ret < 0)
			goto clean_card;
	}

	
	
	ret = snd_pcm_new(the_card.card,
			  "SPDIF",
			  0, 
			  1, 
			  0, 
			  &(the_card.pcm));
	if (ret)
		goto clean_card;

	the_card.pcm->private_data = &the_card;
	strcpy(the_card.pcm->name, "SPDIF");

	
	snd_pcm_set_ops(the_card.pcm, SNDRV_PCM_STREAM_PLAYBACK,
			&snd_ps3_pcm_spdif_ops);

	the_card.pcm->info_flags = SNDRV_PCM_INFO_NONINTERLEAVED;
	
	ret = snd_pcm_lib_preallocate_pages_for_all(the_card.pcm,
					SNDRV_DMA_TYPE_DEV,
					&dev->core,
					SND_PS3_PCM_PREALLOC_SIZE,
					SND_PS3_PCM_PREALLOC_SIZE);
	if (ret < 0) {
		pr_info("%s: prealloc failed\n", __func__);
		goto clean_card;
	}

	the_card.null_buffer_start_vaddr =
		dma_alloc_coherent(&the_card.ps3_dev->core,
				   PAGE_SIZE,
				   &the_card.null_buffer_start_dma_addr,
				   GFP_KERNEL);
	if (!the_card.null_buffer_start_vaddr) {
		pr_info("%s: nullbuffer alloc failed\n", __func__);
		goto clean_preallocate;
	}
	pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
		 the_card.null_buffer_start_vaddr,
		 the_card.null_buffer_start_dma_addr);
	
	snd_ps3_init_avsetting(&the_card);

	
	snd_card_set_dev(the_card.card, &dev->core);
	ret = snd_card_register(the_card.card);
	if (ret < 0)
		goto clean_dma_map;

	pr_info("%s started. start_delay=%dms\n",
		the_card.card->longname, the_card.start_delay);
	return 0;

clean_dma_map:
	dma_free_coherent(&the_card.ps3_dev->core,
			  PAGE_SIZE,
			  the_card.null_buffer_start_vaddr,
			  the_card.null_buffer_start_dma_addr);
clean_preallocate:
	snd_pcm_lib_preallocate_free_for_all(the_card.pcm);
clean_card:
	snd_card_free(the_card.card);
clean_irq:
	snd_ps3_free_irq();
clean_dma_region:
	ps3_dma_region_free(dev->d_region);
clean_mmio:
	snd_ps3_unmap_mmio();
clean_dev_map:
	lv1_gpu_device_unmap(2);
clean_open:
	ps3_close_hv_device(dev);
	return ret;
}; 
示例#9
0
int main(void)
{
	OFFSET(THREAD, task_struct, thread);
	OFFSET(MM, task_struct, mm);
	OFFSET(MMCONTEXTID, mm_struct, context.id);
#ifdef CONFIG_PPC64
	DEFINE(SIGSEGV, SIGSEGV);
	DEFINE(NMI_MASK, NMI_MASK);
	OFFSET(TASKTHREADPPR, task_struct, thread.ppr);
#else
	OFFSET(THREAD_INFO, task_struct, stack);
	DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
	OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
#endif /* CONFIG_PPC64 */

#ifdef CONFIG_LIVEPATCH
	OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
#endif

	OFFSET(KSP, thread_struct, ksp);
	OFFSET(PT_REGS, thread_struct, regs);
#ifdef CONFIG_BOOKE
	OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]);
#endif
	OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode);
	OFFSET(THREAD_FPSTATE, thread_struct, fp_state.fpr);
	OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area);
	OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr);
	OFFSET(THREAD_LOAD_FP, thread_struct, load_fp);
#ifdef CONFIG_ALTIVEC
	OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
	OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
	OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
	OFFSET(THREAD_USED_VR, thread_struct, used_vr);
	OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
	OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
	OFFSET(THREAD_USED_VSR, thread_struct, used_vsr);
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
	OFFSET(KSP_VSID, thread_struct, ksp_vsid);
#else /* CONFIG_PPC64 */
	OFFSET(PGDIR, thread_struct, pgdir);
#ifdef CONFIG_SPE
	OFFSET(THREAD_EVR0, thread_struct, evr[0]);
	OFFSET(THREAD_ACC, thread_struct, acc);
	OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
	OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
	OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
	OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
	OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
	OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar);
	OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr);
	OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar);
	OFFSET(THREAD_TM_TAR, thread_struct, tm_tar);
	OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
	OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
	OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
	OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
	OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
	OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state.fpr);
	/* Local pt_regs on stack for Transactional Memory funcs. */
	DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
	       sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

	OFFSET(TI_FLAGS, thread_info, flags);
	OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
	OFFSET(TI_PREEMPT, thread_info, preempt_count);
	OFFSET(TI_TASK, thread_info, task);
	OFFSET(TI_CPU, thread_info, cpu);

#ifdef CONFIG_PPC64
	OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
	OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
	OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
	OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
	OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
	OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
	/* paca */
	DEFINE(PACA_SIZE, sizeof(struct paca_struct));
	OFFSET(PACAPACAINDEX, paca_struct, paca_index);
	OFFSET(PACAPROCSTART, paca_struct, cpu_start);
	OFFSET(PACAKSAVE, paca_struct, kstack);
	OFFSET(PACACURRENT, paca_struct, __current);
	OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
	OFFSET(PACASTABRR, paca_struct, stab_rr);
	OFFSET(PACAR1, paca_struct, saved_r1);
	OFFSET(PACATOC, paca_struct, kernel_toc);
	OFFSET(PACAKBASE, paca_struct, kernelbase);
	OFFSET(PACAKMSR, paca_struct, kernel_msr);
	OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
	OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
#ifdef CONFIG_PPC_BOOK3S
	OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
#ifdef CONFIG_PPC_MM_SLICES
	OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
	OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
	OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
	DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */
#endif

#ifdef CONFIG_PPC_BOOK3E
	OFFSET(PACAPGD, paca_struct, pgd);
	OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd);
	OFFSET(PACA_EXGEN, paca_struct, exgen);
	OFFSET(PACA_EXTLB, paca_struct, extlb);
	OFFSET(PACA_EXMC, paca_struct, exmc);
	OFFSET(PACA_EXCRIT, paca_struct, excrit);
	OFFSET(PACA_EXDBG, paca_struct, exdbg);
	OFFSET(PACA_MC_STACK, paca_struct, mc_kstack);
	OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack);
	OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack);
	OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr);

	OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next);
	OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max);
	OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first);
#endif /* CONFIG_PPC_BOOK3E */

#ifdef CONFIG_PPC_BOOK3S_64
	OFFSET(PACASLBCACHE, paca_struct, slb_cache);
	OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
	OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
#ifdef CONFIG_PPC_MM_SLICES
	OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
#else
	OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
#endif /* CONFIG_PPC_MM_SLICES */
	OFFSET(PACA_EXGEN, paca_struct, exgen);
	OFFSET(PACA_EXMC, paca_struct, exmc);
	OFFSET(PACA_EXSLB, paca_struct, exslb);
	OFFSET(PACA_EXNMI, paca_struct, exnmi);
#ifdef CONFIG_PPC_PSERIES
	OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
#endif
	OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
	OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
	OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
	OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
	OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
	OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
	OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
	OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
#endif /* CONFIG_PPC_BOOK3S_64 */
	OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
	OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp);
	OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
	OFFSET(PACA_IN_MCE, paca_struct, in_mce);
	OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
	OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
	OFFSET(PACA_EXRFI, paca_struct, exrfi);
	OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size);

#endif
	OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
	OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
	OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
	OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime);
	OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
	OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
	OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
	OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
	OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
	OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime);
	OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user);
	OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime);
	OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime);
#endif
#endif /* CONFIG_PPC64 */

	/* RTAS */
	OFFSET(RTASBASE, rtas_t, base);
	OFFSET(RTASENTRY, rtas_t, entry);

	/* Interrupt register frame */
	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
#ifdef CONFIG_PPC64
	/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
	DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
	DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC64 */
	STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
	STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
	STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
	STACK_PT_REGS_OFFSET(GPR3, gpr[3]);
	STACK_PT_REGS_OFFSET(GPR4, gpr[4]);
	STACK_PT_REGS_OFFSET(GPR5, gpr[5]);
	STACK_PT_REGS_OFFSET(GPR6, gpr[6]);
	STACK_PT_REGS_OFFSET(GPR7, gpr[7]);
	STACK_PT_REGS_OFFSET(GPR8, gpr[8]);
	STACK_PT_REGS_OFFSET(GPR9, gpr[9]);
	STACK_PT_REGS_OFFSET(GPR10, gpr[10]);
	STACK_PT_REGS_OFFSET(GPR11, gpr[11]);
	STACK_PT_REGS_OFFSET(GPR12, gpr[12]);
	STACK_PT_REGS_OFFSET(GPR13, gpr[13]);
#ifndef CONFIG_PPC64
	STACK_PT_REGS_OFFSET(GPR14, gpr[14]);
#endif /* CONFIG_PPC64 */
	/*
	 * Note: these symbols include _ because they overlap with special
	 * register names
	 */
	STACK_PT_REGS_OFFSET(_NIP, nip);
	STACK_PT_REGS_OFFSET(_MSR, msr);
	STACK_PT_REGS_OFFSET(_CTR, ctr);
	STACK_PT_REGS_OFFSET(_LINK, link);
	STACK_PT_REGS_OFFSET(_CCR, ccr);
	STACK_PT_REGS_OFFSET(_XER, xer);
	STACK_PT_REGS_OFFSET(_DAR, dar);
	STACK_PT_REGS_OFFSET(_DSISR, dsisr);
	STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3);
	STACK_PT_REGS_OFFSET(RESULT, result);
	STACK_PT_REGS_OFFSET(_TRAP, trap);
#ifndef CONFIG_PPC64
	/*
	 * The PowerPC 400-class & Book-E processors have neither the DAR
	 * nor the DSISR SPRs. Hence, we overload them to hold the similar
	 * DEAR and ESR SPRs for such processors.  For critical interrupts
	 * we use them to hold SRR0 and SRR1.
	 */
	STACK_PT_REGS_OFFSET(_DEAR, dar);
	STACK_PT_REGS_OFFSET(_ESR, dsisr);
#else /* CONFIG_PPC64 */
	STACK_PT_REGS_OFFSET(SOFTE, softe);

	/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
	DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
	DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
#endif /* CONFIG_PPC64 */

#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
	DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
	DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
	DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
	DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
	DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
	DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
	DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
	DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
	DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
	DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
	DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
	DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
	DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
#endif

#ifndef CONFIG_PPC64
	OFFSET(MM_PGD, mm_struct, pgd);
#endif /* ! CONFIG_PPC64 */

	/* About the CPU features table */
	OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
	OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
	OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore);

	OFFSET(pbe_address, pbe, address);
	OFFSET(pbe_orig_address, pbe, orig_address);
	OFFSET(pbe_next, pbe, next);

#ifndef CONFIG_PPC64
	DEFINE(TASK_SIZE, TASK_SIZE);
	DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
#endif /* ! CONFIG_PPC64 */

	/* datapage offsets for use by vdso */
	OFFSET(CFG_TB_ORIG_STAMP, vdso_data, tb_orig_stamp);
	OFFSET(CFG_TB_TICKS_PER_SEC, vdso_data, tb_ticks_per_sec);
	OFFSET(CFG_TB_TO_XS, vdso_data, tb_to_xs);
	OFFSET(CFG_TB_UPDATE_COUNT, vdso_data, tb_update_count);
	OFFSET(CFG_TZ_MINUTEWEST, vdso_data, tz_minuteswest);
	OFFSET(CFG_TZ_DSTTIME, vdso_data, tz_dsttime);
	OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32);
	OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec);
	OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec);
	OFFSET(STAMP_XTIME, vdso_data, stamp_xtime);
	OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
	OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
	OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
	OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size);
	OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size);
#ifdef CONFIG_PPC64
	OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
	OFFSET(TVAL64_TV_SEC, timeval, tv_sec);
	OFFSET(TVAL64_TV_USEC, timeval, tv_usec);
	OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec);
	OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec);
	OFFSET(TSPC64_TV_SEC, timespec, tv_sec);
	OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec);
	OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec);
	OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec);
#else
	OFFSET(TVAL32_TV_SEC, timeval, tv_sec);
	OFFSET(TVAL32_TV_USEC, timeval, tv_usec);
	OFFSET(TSPC32_TV_SEC, timespec, tv_sec);
	OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec);
#endif
	/* timeval/timezone offsets for use by vdso */
	OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest);
	OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime);

	/* Other bits used by the vdso */
	DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
	DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
	DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
	DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
	DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
	DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);

#ifdef CONFIG_BUG
	DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif

#ifdef CONFIG_PPC_BOOK3S_64
	DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
#else
	DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
#endif
	DEFINE(PTE_SIZE, sizeof(pte_t));

#ifdef CONFIG_KVM
	OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
	OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
	OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
	OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr);
	OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
	OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
#ifdef CONFIG_ALTIVEC
	OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
#endif
	OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
	OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
	OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
#ifdef CONFIG_PPC_BOOK3S
	OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif
	OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
	OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
	OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
	OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1);
	OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0);
	OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1);
	OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
	OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
	OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
	OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
	OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time);
	OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time);
	OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity);
	OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start);
	OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount);
	OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total);
	OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min);
	OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max);
#endif
	OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3);
	OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4);
	OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5);
	OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6);
	OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7);
	OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid);
	OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1);
	OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared);
	OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr);
	OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr);
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
	OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian);
#endif

	OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0);
	OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1);
	OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2);
	OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3);
	OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4);
	OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6);

	OFFSET(VCPU_KVM, kvm_vcpu, kvm);
	OFFSET(KVM_LPID, kvm, arch.lpid);

	/* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets);
	OFFSET(KVM_SDR1, kvm, arch.sdr1);
	OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
	OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
	OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
	OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
	OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
	OFFSET(KVM_RADIX, kvm, arch.radix);
	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
	OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
	OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
	OFFSET(VCPU_CPU, kvm_vcpu, cpu);
	OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
#endif
#ifdef CONFIG_PPC_BOOK3S
	OFFSET(VCPU_PURR, kvm_vcpu, arch.purr);
	OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr);
	OFFSET(VCPU_IC, kvm_vcpu, arch.ic);
	OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr);
	OFFSET(VCPU_AMR, kvm_vcpu, arch.amr);
	OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor);
	OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr);
	OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
	OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
	OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
	OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr);
	OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx);
	OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
	OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
	OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
	OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
	OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
	OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
	OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
	OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
	OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
	OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
	OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
	OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
	OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
	OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
	OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
	OFFSET(VCPU_SLB, kvm_vcpu, arch.slb);
	OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max);
	OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
	OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
	OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
	OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
	OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
	OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
	OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
	OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar);
	OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr);
	OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr);
	OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb);
	OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr);
	OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr);
	OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr);
	OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr);
	OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr);
	OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr);
	OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop);
	OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
	OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
	OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
	OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr);
	OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
	OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
	OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
	OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
	OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
	OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
	OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
	OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
	OFFSET(VCORE_VTB, kvmppc_vcore, vtb);
	OFFSET(VCPU_SLB_E, kvmppc_slb, orige);
	OFFSET(VCPU_SLB_V, kvmppc_slb, origv);
	DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar);
	OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar);
	OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr);
	OFFSET(VCPU_ORIG_TEXASR, kvm_vcpu, arch.orig_texasr);
	OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm);
	OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr);
	OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr);
	OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm);
	OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm);
	OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm);
	OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm);
	OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm);
	OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm);
	OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm);
	OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm);
	OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm);
#endif

#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
	OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu);
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else
# define SVCPU_FIELD(x, f)
#endif
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
#else	/* 32-bit */
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
#endif

	SVCPU_FIELD(SVCPU_CR, cr);
	SVCPU_FIELD(SVCPU_XER, xer);
	SVCPU_FIELD(SVCPU_CTR, ctr);
	SVCPU_FIELD(SVCPU_LR, lr);
	SVCPU_FIELD(SVCPU_PC, pc);
	SVCPU_FIELD(SVCPU_R0, gpr[0]);
	SVCPU_FIELD(SVCPU_R1, gpr[1]);
	SVCPU_FIELD(SVCPU_R2, gpr[2]);
	SVCPU_FIELD(SVCPU_R3, gpr[3]);
	SVCPU_FIELD(SVCPU_R4, gpr[4]);
	SVCPU_FIELD(SVCPU_R5, gpr[5]);
	SVCPU_FIELD(SVCPU_R6, gpr[6]);
	SVCPU_FIELD(SVCPU_R7, gpr[7]);
	SVCPU_FIELD(SVCPU_R8, gpr[8]);
	SVCPU_FIELD(SVCPU_R9, gpr[9]);
	SVCPU_FIELD(SVCPU_R10, gpr[10]);
	SVCPU_FIELD(SVCPU_R11, gpr[11]);
	SVCPU_FIELD(SVCPU_R12, gpr[12]);
	SVCPU_FIELD(SVCPU_R13, gpr[13]);
	SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
	SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
	SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
	SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
	SVCPU_FIELD(SVCPU_SR, sr);
#endif
#ifdef CONFIG_PPC64
	SVCPU_FIELD(SVCPU_SLB, slb);
	SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
	SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr);
#endif

	HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
	HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
	HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
	HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
	HSTATE_FIELD(HSTATE_NAPPING, napping);

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
	HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
	HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
	HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
	HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
	HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
	HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
	HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
	HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
	HSTATE_FIELD(HSTATE_PTID, ptid);
	HSTATE_FIELD(HSTATE_TID, tid);
	HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
	HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
	HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
	HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]);
	HSTATE_FIELD(HSTATE_SIAR, host_mmcr[3]);
	HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
	HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
	HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
	HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
	HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
	HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
	HSTATE_FIELD(HSTATE_PMC4, host_pmc[3]);
	HSTATE_FIELD(HSTATE_PMC5, host_pmc[4]);
	HSTATE_FIELD(HSTATE_PMC6, host_pmc[5]);
	HSTATE_FIELD(HSTATE_PURR, host_purr);
	HSTATE_FIELD(HSTATE_SPURR, host_spurr);
	HSTATE_FIELD(HSTATE_DSCR, host_dscr);
	HSTATE_FIELD(HSTATE_DABR, dabr);
	HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
	HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
	DEFINE(IPI_PRIORITY, IPI_PRIORITY);
	OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr);
	OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar);
	OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
	OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap);
	OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped);
	OFFSET(KVM_SPLIT_DO_SET, kvm_split_mode, do_set);
	OFFSET(KVM_SPLIT_DO_RESTORE, kvm_split_mode, do_restore);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

#ifdef CONFIG_PPC_BOOK3S_64
	HSTATE_FIELD(HSTATE_CFAR, cfar);
	HSTATE_FIELD(HSTATE_PPR, ppr);
	HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr);
#endif /* CONFIG_PPC_BOOK3S_64 */

#else /* CONFIG_PPC_BOOK3S */
	OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
	OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
	OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
	OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
	OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
	OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
	OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
	OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
	OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr);
	OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save);
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_KVM */

#ifdef CONFIG_KVM_GUEST
	OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1);
	OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2);
	OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3);
	OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending);
	OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr);
	OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical);
	OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr);
#endif

#ifdef CONFIG_44x
	DEFINE(PGD_T_LOG2, PGD_T_LOG2);
	DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
	DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
	OFFSET(TLBCAM_MAS0, tlbcam, MAS0);
	OFFSET(TLBCAM_MAS1, tlbcam, MAS1);
	OFFSET(TLBCAM_MAS2, tlbcam, MAS2);
	OFFSET(TLBCAM_MAS3, tlbcam, MAS3);
	OFFSET(TLBCAM_MAS7, tlbcam, MAS7);
#endif

#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
	OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]);
	OFFSET(VCPU_ACC, kvm_vcpu, arch.acc);
	OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr);
	OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr);
#endif

#ifdef CONFIG_KVM_BOOKE_HV
	OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4);
	OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
#endif

#ifdef CONFIG_KVM_XICS
	DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
					       arch.xive_saved_state));
	DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
					    arch.xive_cam_word));
	DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
	DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on));
	DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr));
	DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr));
#endif

#ifdef CONFIG_KVM_EXIT_TIMING
	OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
	OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
	OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu);
	OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl);
#endif

#ifdef CONFIG_PPC_POWERNV
	OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr);
	OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
	OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
	OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
	OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
	OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
	OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
#define STOP_SPR(x, f)	OFFSET(x, paca_struct, stop_sprs.f)
	STOP_SPR(STOP_PID, pid);
	STOP_SPR(STOP_LDBAR, ldbar);
	STOP_SPR(STOP_FSCR, fscr);
	STOP_SPR(STOP_HFSCR, hfscr);
	STOP_SPR(STOP_MMCR1, mmcr1);
	STOP_SPR(STOP_MMCR2, mmcr2);
	STOP_SPR(STOP_MMCRA, mmcra);
#endif

	DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
	DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);

#ifdef CONFIG_PPC_8xx
	DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
#endif

	return 0;
}
示例#10
0
void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
{
	int len;
	kernel_entry_t kernel_entry;

	memset(__bss_start, 0, _end - __bss_start);

	prom = (int (*)(void *)) promptr;
	chosen_handle = finddevice("/chosen");
	if (chosen_handle == (void *) -1)
		exit();
	if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
		exit();

	printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);

	/*
	 * The first available claim_base must be above the end of the
	 * the loaded kernel wrapper file (_start to _end includes the
	 * initrd image if it is present) and rounded up to a nice
	 * 1 MB boundary for good measure.
	 */

	claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);

	vmlinuz.addr = (unsigned long)_vmlinux_start;
	vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);

	/* gunzip the ELF header of the kernel */
	if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
		len = vmlinuz.size;
		gunzip(elfheader, sizeof(elfheader),
				(unsigned char *)vmlinuz.addr, &len);
	} else
		memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader));

	if (!is_elf64(elfheader) && !is_elf32(elfheader)) {
		printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
		exit();
	}

	/* We need to claim the memsize plus the file offset since gzip
	 * will expand the header (file offset), then the kernel, then
	 * possible rubbish we don't care about. But the kernel bss must
	 * be claimed (it will be zero'd by the kernel itself)
	 */
	printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
	vmlinux.addr = try_claim(vmlinux.memsize);
	if (vmlinux.addr == 0) {
		printf("Can't allocate memory for kernel image !\n\r");
		exit();
	}

	/*
	 * Now we try to claim memory for the initrd (and copy it there)
	 */
	initrd.size = (unsigned long)(_initrd_end - _initrd_start);
	initrd.memsize = initrd.size;
	if ( initrd.size > 0 ) {
		printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size);
		initrd.addr = try_claim(initrd.size);
		if (initrd.addr == 0) {
			printf("Can't allocate memory for initial ramdisk !\n\r");
			exit();
		}
		a1 = initrd.addr;
		a2 = initrd.size;
		printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r",
		       initrd.addr, (unsigned long)_initrd_start, initrd.size);
		memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size);
		printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr));
	}

	/* Eventually gunzip the kernel */
	if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
		printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
		       vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
		len = vmlinuz.size;
		gunzip((void *)vmlinux.addr, vmlinux.memsize,
			(unsigned char *)vmlinuz.addr, &len);
		printf("done 0x%lx bytes\n\r", len);
	} else {
		memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
	}

	export_cmdline(chosen_handle);

	/* Skip over the ELF header */
#ifdef DEBUG
	printf("... skipping 0x%lx bytes of ELF header\n\r",
			elfoffset);
#endif
	vmlinux.addr += elfoffset;

	flush_cache((void *)vmlinux.addr, vmlinux.size);

	kernel_entry = (kernel_entry_t)vmlinux.addr;
#ifdef DEBUG
	printf( "kernel:\n\r"
		"        entry addr = 0x%lx\n\r"
		"        a1         = 0x%lx,\n\r"
		"        a2         = 0x%lx,\n\r"
		"        prom       = 0x%lx,\n\r"
		"        bi_recs    = 0x%lx,\n\r",
		(unsigned long)kernel_entry, a1, a2,
		(unsigned long)prom, NULL);
#endif

	kernel_entry(a1, a2, prom, NULL);

	printf("Error: Linux kernel returned to zImage bootloader!\n\r");

	exit();
}
示例#11
0
static int __init parse_numa_properties(void)
{
	struct device_node *cpu = NULL;
	struct device_node *memory = NULL;
	int depth;
	int max_domain = 0;
	long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
	unsigned long i;

	if (strstr(saved_command_line, "numa=off")) {
		printk(KERN_WARNING "NUMA disabled by user\n");
		return -1;
	}

	numa_memory_lookup_table =
		(char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));

	for (i = 0; i < entries ; i++)
		numa_memory_lookup_table[i] = ARRAY_INITIALISER;

	depth = find_min_common_depth();

	printk(KERN_INFO "NUMA associativity depth for CPU/Memory: %d\n", depth);
	if (depth < 0)
		return depth;

	for_each_cpu(i) {
		int numa_domain;

		cpu = find_cpu_node(i);

		if (cpu) {
			numa_domain = of_node_numa_domain(cpu, depth);
			of_node_put(cpu);

			if (numa_domain >= MAX_NUMNODES) {
				/*
			 	 * POWER4 LPAR uses 0xffff as invalid node,
				 * dont warn in this case.
			 	 */
				if (numa_domain != 0xffff)
					printk(KERN_ERR "WARNING: cpu %ld "
					       "maps to invalid NUMA node %d\n",
					       i, numa_domain);
				numa_domain = 0;
			}
		} else {
			printk(KERN_ERR "WARNING: no NUMA information for "
			       "cpu %ld\n", i);
			numa_domain = 0;
		}

		node_set_online(numa_domain);

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		map_cpu_to_node(i, numa_domain);
	}

	memory = NULL;
	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
		unsigned long start;
		unsigned long size;
		int numa_domain;
		int ranges;
		unsigned int *memcell_buf;
		unsigned int len;

		memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
		if (!memcell_buf || len <= 0)
			continue;

		ranges = memory->n_addrs;
new_range:
		/* these are order-sensitive, and modify the buffer pointer */
		start = read_cell_ul(memory, &memcell_buf);
		size = read_cell_ul(memory, &memcell_buf);

		start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
		size = _ALIGN_UP(size, MEMORY_INCREMENT);

		numa_domain = of_node_numa_domain(memory, depth);

		if (numa_domain >= MAX_NUMNODES) {
			if (numa_domain != 0xffff)
				printk(KERN_ERR "WARNING: memory at %lx maps "
				       "to invalid NUMA node %d\n", start,
				       numa_domain);
			numa_domain = 0;
		}

		node_set_online(numa_domain);

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		/* 
		 * For backwards compatibility, OF splits the first node
		 * into two regions (the first being 0-4GB). Check for
		 * this simple case and complain if there is a gap in
		 * memory
		 */
		if (node_data[numa_domain].node_spanned_pages) {
			unsigned long shouldstart =
				node_data[numa_domain].node_start_pfn + 
				node_data[numa_domain].node_spanned_pages;
			if (shouldstart != (start / PAGE_SIZE)) {
				printk(KERN_ERR "Hole in node, disabling "
						"region start %lx length %lx\n",
						start, size);
				continue;
			}
			node_data[numa_domain].node_spanned_pages +=
				size / PAGE_SIZE;
		} else {
			node_data[numa_domain].node_start_pfn =
				start / PAGE_SIZE;
			node_data[numa_domain].node_spanned_pages =
				size / PAGE_SIZE;
		}

		for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
			numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
				numa_domain;

		dbg("memory region %lx to %lx maps to domain %d\n",
		    start, start+size, numa_domain);

		ranges--;
		if (ranges)
			goto new_range;
	}

	numnodes = max_domain + 1;

	return 0;
}
示例#12
0
static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
{
	int i, ret;
	u64 lpar_addr, lpar_size;

	BUG_ON(!firmware_has_feature(FW_FEATURE_PS3_LV1));
	BUG_ON(dev->match_id != PS3_MATCH_ID_SOUND);

	the_card.ps3_dev = dev;

	ret = ps3_open_hv_device(dev);

	if (ret)
		return -ENXIO;

	/* setup MMIO */
	ret = lv1_gpu_device_map(2, &lpar_addr, &lpar_size);
	if (ret) {
		pr_info("%s: device map 2 failed %d\n", __func__, ret);
		goto clean_open;
	}
	ps3_mmio_region_init(dev, dev->m_region, lpar_addr, lpar_size,
		PAGE_SHIFT);

	ret = snd_ps3_map_mmio();
	if (ret)
		goto clean_dev_map;

	/* setup DMA area */
	ps3_dma_region_init(dev, dev->d_region,
			    PAGE_SHIFT, /* use system page size */
			    0, /* dma type; not used */
			    NULL,
			    _ALIGN_UP(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE));
	dev->d_region->ioid = PS3_AUDIO_IOID;

	ret = ps3_dma_region_create(dev->d_region);
	if (ret) {
		pr_info("%s: region_create\n", __func__);
		goto clean_mmio;
	}

	snd_ps3_audio_set_base_addr(dev->d_region->bus_addr);

	/* CONFIG_SND_PS3_DEFAULT_START_DELAY */
	the_card.start_delay = snd_ps3_start_delay;

	/* irq */
	if (snd_ps3_allocate_irq()) {
		ret = -ENXIO;
		goto clean_dma_region;
	}

	/* create card instance */
	ret = snd_card_create(index, id, THIS_MODULE, 0, &the_card.card);
	if (ret < 0)
		goto clean_irq;

	strcpy(the_card.card->driver, "PS3");
	strcpy(the_card.card->shortname, "PS3");
	strcpy(the_card.card->longname, "PS3 sound");

	/* create control elements */
	for (i = 0; i < ARRAY_SIZE(spdif_ctls); i++) {
		ret = snd_ctl_add(the_card.card,
				  snd_ctl_new1(&spdif_ctls[i], &the_card));
		if (ret < 0)
			goto clean_card;
	}

	/* create PCM devices instance */
	/* NOTE:this driver works assuming pcm:substream = 1:1 */
	ret = snd_pcm_new(the_card.card,
			  "SPDIF",
			  0, /* instance index, will be stored pcm.device*/
			  1, /* output substream */
			  0, /* input substream */
			  &(the_card.pcm));
	if (ret)
		goto clean_card;

	the_card.pcm->private_data = &the_card;
	strcpy(the_card.pcm->name, "SPDIF");

	/* set pcm ops */
	snd_pcm_set_ops(the_card.pcm, SNDRV_PCM_STREAM_PLAYBACK,
			&snd_ps3_pcm_spdif_ops);

	the_card.pcm->info_flags = SNDRV_PCM_INFO_NONINTERLEAVED;
	/* pre-alloc PCM DMA buffer*/
	ret = snd_pcm_lib_preallocate_pages_for_all(the_card.pcm,
					SNDRV_DMA_TYPE_DEV,
					&dev->core,
					SND_PS3_PCM_PREALLOC_SIZE,
					SND_PS3_PCM_PREALLOC_SIZE);
	if (ret < 0) {
		pr_info("%s: prealloc failed\n", __func__);
		goto clean_card;
	}

	/*
	 * allocate null buffer
	 * its size should be lager than PS3_AUDIO_FIFO_STAGE_SIZE * 2
	 * PAGE_SIZE is enogh
	 */
	the_card.null_buffer_start_vaddr =
		dma_alloc_coherent(&the_card.ps3_dev->core,
				   PAGE_SIZE,
				   &the_card.null_buffer_start_dma_addr,
				   GFP_KERNEL);
	if (!the_card.null_buffer_start_vaddr) {
		pr_info("%s: nullbuffer alloc failed\n", __func__);
		goto clean_preallocate;
	}
	pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
		 the_card.null_buffer_start_vaddr,
		 the_card.null_buffer_start_dma_addr);
	/* set default sample rate/word width */
	snd_ps3_init_avsetting(&the_card);

	/* register the card */
	snd_card_set_dev(the_card.card, &dev->core);
	ret = snd_card_register(the_card.card);
	if (ret < 0)
		goto clean_dma_map;

	pr_info("%s started. start_delay=%dms\n",
		the_card.card->longname, the_card.start_delay);
	return 0;

clean_dma_map:
	dma_free_coherent(&the_card.ps3_dev->core,
			  PAGE_SIZE,
			  the_card.null_buffer_start_vaddr,
			  the_card.null_buffer_start_dma_addr);
clean_preallocate:
	snd_pcm_lib_preallocate_free_for_all(the_card.pcm);
clean_card:
	snd_card_free(the_card.card);
clean_irq:
	snd_ps3_free_irq();
clean_dma_region:
	ps3_dma_region_free(dev->d_region);
clean_mmio:
	snd_ps3_unmap_mmio();
clean_dev_map:
	lv1_gpu_device_unmap(2);
clean_open:
	ps3_close_hv_device(dev);
	/*
	 * there is no destructor function to pcm.
	 * midlayer automatically releases if the card removed
	 */
	return ret;
}; /* snd_ps3_probe */
示例#13
0
TInt RHybridHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
{
    TInt r = KErrNone;
    switch(aFunc)
        {
        
        case RAllocator::ECount:
            struct HeapInfo info;
            Lock();
            GetInfo(&info, NULL);
            *(unsigned*)a1 = info.iFreeN;
            r = info.iAllocN;
            Unlock();
            break;
            
        case RAllocator::EMarkStart:
            __DEBUG_ONLY(DoMarkStart());
            break;
            
        case RAllocator::EMarkEnd:
            __DEBUG_ONLY( r = DoMarkEnd((TInt)a1) );
            break;
            
        case RAllocator::ECheck:
            r = DoCheckHeap((SCheckInfo*)a1);
            break;
            
        case RAllocator::ESetFail:
            __DEBUG_ONLY(DoSetAllocFail((TAllocFail)(TInt)a1, (TInt)a2));
            break;

        case RHybridHeap::EGetFail:
            __DEBUG_ONLY(r = iFailType);
            break;

        case RHybridHeap::ESetBurstFail:
#if _DEBUG
            {
            SRAllocatorBurstFail* fail = (SRAllocatorBurstFail*) a2;
            DoSetAllocFail((TAllocFail)(TInt)a1, fail->iRate, fail->iBurst);
            }
#endif
            break;
            
        case RHybridHeap::ECheckFailure:
            // iRand will be incremented for each EFailNext, EBurstFailNext,
            // EDeterministic and EBurstDeterministic failure.
            r = iRand;
            break;
            
        case RAllocator::ECopyDebugInfo:
            {
            TInt nestingLevel = ((SDebugCell*)a1)[-1].nestingLevel;
            ((SDebugCell*)a2)[-1].nestingLevel = nestingLevel;
            break;
            }

		case RHybridHeap::EGetSize:
			{
			r = iChunkSize - sizeof(RHybridHeap);
			break;
			}

		case RHybridHeap::EGetMaxLength:
			{
			r = iMaxLength;
			break;
			}

		case RHybridHeap::EGetBase:
			{
			*(TAny**)a1 = iBase;
			break;
			}

		case RHybridHeap::EAlignInteger:
			{
			r = _ALIGN_UP((TInt)a1, iAlign);
			break;
			}

		case RHybridHeap::EAlignAddr:
			{
            *(TAny**)a2 = (TAny*)_ALIGN_UP((TLinAddr)a1, iAlign);
			break;
			}

        case RHybridHeap::EWalk:
            struct HeapInfo hinfo;
            SWalkInfo winfo;
            Lock();
            winfo.iFunction = (TWalkFunc)a1;
            winfo.iParam    = a2;
			winfo.iHeap     = (RHybridHeap*)this; 	
            GetInfo(&hinfo, &winfo);
            Unlock();
            break;

#ifndef __KERNEL_MODE__
			
        case RHybridHeap::EHybridHeap:
            {
			if ( !a1 )
				return KErrGeneral;
			STestCommand* cmd = (STestCommand*)a1;
			switch ( cmd->iCommand )
				{
				case EGetConfig:
					cmd->iConfig.iSlabBits = iSlabConfigBits;
					cmd->iConfig.iDelayedSlabThreshold = iPageThreshold;
					cmd->iConfig.iPagePower = iPageThreshold;
					break;
					
				case ESetConfig:
					//
					// New configuration data for slab and page allocator.
					// Reset heap to get data into use
					//
#if USE_HYBRID_HEAP
					iSlabConfigBits  = cmd->iConfig.iSlabBits & 0x3fff;
					iSlabInitThreshold = cmd->iConfig.iDelayedSlabThreshold;
					iPageThreshold = (cmd->iConfig.iPagePower & 0x1f);
					Reset();
#endif
					break;
					
				case EHeapMetaData:
					cmd->iData = this;
					break;
					
				case ETestData:
					iTestData = cmd->iData;
					break;

				default:
					return KErrNotSupported;
					
				}

            break;
			}
#endif  // __KERNEL_MODE            
            
        default:
            return KErrNotSupported;
            
        }
    return r;
}
示例#14
0
void start(unsigned long a1, unsigned long a2, void *promptr)
{
	unsigned long i;
	kernel_entry_t kernel_entry;
	Elf64_Ehdr *elf64;
	Elf64_Phdr *elf64ph;

	prom = (int (*)(void *)) promptr;
	chosen_handle = finddevice("/chosen");
	if (chosen_handle == (void *) -1)
		exit();
	if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
		exit();
	stderr = stdout;
	if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
		exit();

	printf("\n\rzImage starting: loaded at 0x%lx\n\r", (unsigned long) _start);

	/*
	 * The first available claim_base must be above the end of the
	 * the loaded kernel wrapper file (_start to _end includes the
	 * initrd image if it is present) and rounded up to a nice
	 * 1 MB boundary for good measure.
	 */

	claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);

#if defined(PROG_START)
	/*
	 * Maintain a "magic" minimum address. This keeps some older
	 * firmware platforms running.
	 */

	if (claim_base < PROG_START)
		claim_base = PROG_START;
#endif

	/*
	 * Now we try to claim some memory for the kernel itself
	 * our "vmlinux_memsize" is the memory footprint in RAM, _HOWEVER_, what
	 * our Makefile stuffs in is an image containing all sort of junk including
	 * an ELF header. We need to do some calculations here to find the right
	 * size... In practice we add 1Mb, that is enough, but we should really
	 * consider fixing the Makefile to put a _raw_ kernel in there !
	 */
	vmlinux_memsize += ONE_MB;
	printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux_memsize);
	vmlinux.addr = try_claim(vmlinux_memsize);
	if (vmlinux.addr == 0) {
		printf("Can't allocate memory for kernel image !\n\r");
		exit();
	}
	vmlinuz.addr = (unsigned long)_vmlinux_start;
	vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
	vmlinux.size = PAGE_ALIGN(vmlinux_filesize);
	vmlinux.memsize = vmlinux_memsize;

	/*
	 * Now we try to claim memory for the initrd (and copy it there)
	 */
	initrd.size = (unsigned long)(_initrd_end - _initrd_start);
	initrd.memsize = initrd.size;
	if ( initrd.size > 0 ) {
		printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size);
		initrd.addr = try_claim(initrd.size);
		if (initrd.addr == 0) {
			printf("Can't allocate memory for initial ramdisk !\n\r");
			exit();
		}
		a1 = initrd.addr;
		a2 = initrd.size;
		printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r",
		       initrd.addr, (unsigned long)_initrd_start, initrd.size);
		memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size);
		printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr));
	}

	/* Eventually gunzip the kernel */
	if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
		int len;
		avail_ram = scratch;
		begin_avail = avail_high = avail_ram;
		end_avail = scratch + sizeof(scratch);
		printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
		       vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
		len = vmlinuz.size;
		gunzip((void *)vmlinux.addr, vmlinux.size,
			(unsigned char *)vmlinuz.addr, &len);
		printf("done 0x%lx bytes\n\r", len);
		printf("0x%x bytes of heap consumed, max in use 0x%x\n\r",
		       (unsigned)(avail_high - begin_avail), heap_max);
	} else {
		memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
	}

	/* Skip over the ELF header */
	elf64 = (Elf64_Ehdr *)vmlinux.addr;
	if ( elf64->e_ident[EI_MAG0]  != ELFMAG0	||
	     elf64->e_ident[EI_MAG1]  != ELFMAG1	||
	     elf64->e_ident[EI_MAG2]  != ELFMAG2	||
	     elf64->e_ident[EI_MAG3]  != ELFMAG3	||
	     elf64->e_ident[EI_CLASS] != ELFCLASS64	||
	     elf64->e_ident[EI_DATA]  != ELFDATA2MSB	||
	     elf64->e_type            != ET_EXEC	||
	     elf64->e_machine         != EM_PPC64 )
	{
		printf("Error: not a valid PPC64 ELF file!\n\r");
		exit();
	}

	elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
				(unsigned long)elf64->e_phoff);
	for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
		if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
			break;
	}
#ifdef DEBUG
	printf("... skipping 0x%lx bytes of ELF header\n\r",
			(unsigned long)elf64ph->p_offset);
#endif
	vmlinux.addr += (unsigned long)elf64ph->p_offset;
	vmlinux.size -= (unsigned long)elf64ph->p_offset;

	flush_cache((void *)vmlinux.addr, vmlinux.size);

	kernel_entry = (kernel_entry_t)vmlinux.addr;
#ifdef DEBUG
	printf( "kernel:\n\r"
		"        entry addr = 0x%lx\n\r"
		"        a1         = 0x%lx,\n\r"
		"        a2         = 0x%lx,\n\r"
		"        prom       = 0x%lx,\n\r"
		"        bi_recs    = 0x%lx,\n\r",
		(unsigned long)kernel_entry, a1, a2,
		(unsigned long)prom, NULL);
#endif

	kernel_entry( a1, a2, prom, NULL );

	printf("Error: Linux kernel returned to zImage bootloader!\n\r");

	exit();
}
示例#15
0
文件: numa.c 项目: sarnobat/knoppix
static int __init parse_numa_properties(void)
{
	struct device_node *cpu;
	struct device_node *memory;
	int *cpu_associativity;
	int *memory_associativity;
	int depth;
	int max_domain = 0;

	cpu = find_type_devices("cpu");
	if (!cpu)
		return -1;

	memory = find_type_devices("memory");
	if (!memory)
		return -1;

	cpu_associativity = (int *)get_property(cpu, "ibm,associativity", NULL);
	if (!cpu_associativity)
		return -1;

	memory_associativity = (int *)get_property(memory, "ibm,associativity",
						   NULL);
	if (!memory_associativity)
		return -1;

	/* find common depth */
	if (cpu_associativity[0] < memory_associativity[0])
		depth = cpu_associativity[0];
	else
		depth = memory_associativity[0];

	for (cpu = find_type_devices("cpu"); cpu; cpu = cpu->next) {
		int *tmp;
		int cpu_nr, numa_domain;

		tmp = (int *)get_property(cpu, "reg", NULL);
		if (!tmp)
			continue;
		cpu_nr = *tmp;

		tmp = (int *)get_property(cpu, "ibm,associativity",
					  NULL);
		if (!tmp)
			continue;
		numa_domain = tmp[depth];

		/* FIXME */
		if (numa_domain == 0xffff) {
			dbg("cpu %d has no numa doman\n", cpu_nr);
			numa_domain = 0;
		}

		if (numa_domain >= MAX_NUMNODES)
			BUG();

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		map_cpu_to_node(cpu_nr, numa_domain);
	}

	for (memory = find_type_devices("memory"); memory;
	     memory = memory->next) {
		int *tmp1, *tmp2;
		unsigned long i;
		unsigned long start = 0;
		unsigned long size = 0;
		int numa_domain;
		int ranges;

		tmp1 = (int *)get_property(memory, "reg", NULL);
		if (!tmp1)
			continue;

		ranges = memory->n_addrs;
new_range:

		i = prom_n_size_cells(memory);
		while (i--) {
			start = (start << 32) | *tmp1;
			tmp1++;
		}

		i = prom_n_size_cells(memory);
		while (i--) {
			size = (size << 32) | *tmp1;
			tmp1++;
		}

		start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
		size = _ALIGN_UP(size, MEMORY_INCREMENT);

		if ((start + size) > MAX_MEMORY)
			BUG();

		tmp2 = (int *)get_property(memory, "ibm,associativity",
					   NULL);
		if (!tmp2)
			continue;
		numa_domain = tmp2[depth];

		/* FIXME */
		if (numa_domain == 0xffff) {
			dbg("memory has no numa doman\n");
			numa_domain = 0;
		}

		if (numa_domain >= MAX_NUMNODES)
			BUG();

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		/* 
		 * For backwards compatibility, OF splits the first node
		 * into two regions (the first being 0-4GB). Check for
		 * this simple case and complain if there is a gap in
		 * memory
		 */
		if (node_data[numa_domain].node_spanned_pages) {
			unsigned long shouldstart =
				node_data[numa_domain].node_start_pfn + 
				node_data[numa_domain].node_spanned_pages;
			if (shouldstart != (start / PAGE_SIZE)) {
				printk(KERN_ERR "Hole in node, disabling "
						"region start %lx length %lx\n",
						start, size);
				continue;
			}
			node_data[numa_domain].node_spanned_pages += size / PAGE_SIZE;
		} else {
			node_data[numa_domain].node_start_pfn =
				start / PAGE_SIZE;
			node_data[numa_domain].node_spanned_pages = size / PAGE_SIZE;
		}

		for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
			numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
				numa_domain;

		dbg("memory region %lx to %lx maps to domain %d\n",
		    start, start+size, numa_domain);

		ranges--;
		if (ranges)
			goto new_range;
	}

	numnodes = max_domain + 1;

	return 0;
}
示例#16
0
static int __meminit create_physical_mapping(unsigned long start,
					     unsigned long end,
					     int nid)
{
	unsigned long vaddr, addr, mapping_size = 0;
	pgprot_t prot;
	unsigned long max_mapping_size;
#ifdef CONFIG_STRICT_KERNEL_RWX
	int split_text_mapping = 1;
#else
	int split_text_mapping = 0;
#endif

	start = _ALIGN_UP(start, PAGE_SIZE);
	for (addr = start; addr < end; addr += mapping_size) {
		unsigned long gap, previous_size;
		int rc;

		gap = end - addr;
		previous_size = mapping_size;
		max_mapping_size = PUD_SIZE;

retry:
		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
		    mmu_psize_defs[MMU_PAGE_1G].shift &&
		    PUD_SIZE <= max_mapping_size)
			mapping_size = PUD_SIZE;
		else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
			 mmu_psize_defs[MMU_PAGE_2M].shift)
			mapping_size = PMD_SIZE;
		else
			mapping_size = PAGE_SIZE;

		if (split_text_mapping && (mapping_size == PUD_SIZE) &&
			(addr <= __pa_symbol(__init_begin)) &&
			(addr + mapping_size) >= __pa_symbol(_stext)) {
			max_mapping_size = PMD_SIZE;
			goto retry;
		}

		if (split_text_mapping && (mapping_size == PMD_SIZE) &&
		    (addr <= __pa_symbol(__init_begin)) &&
		    (addr + mapping_size) >= __pa_symbol(_stext))
			mapping_size = PAGE_SIZE;

		if (mapping_size != previous_size) {
			print_mapping(start, addr, previous_size);
			start = addr;
		}

		vaddr = (unsigned long)__va(addr);

		if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
		    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
			prot = PAGE_KERNEL_X;
		else
			prot = PAGE_KERNEL;

		rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
		if (rc)
			return rc;
	}

	print_mapping(start, addr, mapping_size);
	return 0;
}
示例#17
0
int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
	u64 tb_cache_size)
{
	int result;
	u64 tb_size;

	BUG_ON(!lpm_priv);
	BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
		&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);

	if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
		dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);

	if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
		dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
		return -EBUSY;
	}

	

	if (tb_type == PS3_LPM_TB_TYPE_NONE) {
		lpm_priv->tb_cache_size = 0;
		lpm_priv->tb_cache_internal = NULL;
		lpm_priv->tb_cache = NULL;
	} else if (tb_cache) {
		if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
			|| tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
			dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
				__func__, __LINE__);
			result = -EINVAL;
			goto fail_align;
		}
		lpm_priv->tb_cache_size = tb_cache_size;
		lpm_priv->tb_cache_internal = NULL;
		lpm_priv->tb_cache = tb_cache;
	} else {
		lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
		lpm_priv->tb_cache_internal = kzalloc(
			lpm_priv->tb_cache_size + 127, GFP_KERNEL);
		if (!lpm_priv->tb_cache_internal) {
			dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
				"failed\n", __func__, __LINE__);
			result = -ENOMEM;
			goto fail_malloc;
		}
		lpm_priv->tb_cache = (void *)_ALIGN_UP(
			(unsigned long)lpm_priv->tb_cache_internal, 128);
	}

	result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
				ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
				lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
				&lpm_priv->outlet_id, &tb_size);

	if (result) {
		dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
			__func__, __LINE__, ps3_result(result));
		result = -EINVAL;
		goto fail_construct;
	}

	lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;

	dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
		"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
		lpm_priv->outlet_id, tb_size);

	return 0;

fail_construct:
	kfree(lpm_priv->tb_cache_internal);
	lpm_priv->tb_cache_internal = NULL;
fail_malloc:
fail_align:
	atomic_dec(&lpm_priv->open);
	return result;
}
示例#18
0
文件: stmfb.c 项目: Firmeware/driver
/*
 *  stmfb_createfb
 *  Create framebuffer related state and register with upper layers
 */
static int stmfb_createfb(struct stmfb_info *i, int display, const char *name)
{
  int ret;
  unsigned long nPages;
  int fb_registered = 0;
  const struct stmcore_display_pipeline_data * const pd =
    *((struct stmcore_display_pipeline_data **) i->platformDevice->dev.platform_data);

  DPRINTK("\n");

  /*
   * Copy all available display modes into the modelist, before we parse
   * the module parameters to get the default mode.
   */
  stmfb_enumerate_modes(i);

  /*
   * Need to set the framebuffer operation table and get hold of the display
   * plane before parsing the module parameters so fb_find_mode can work.
   */
  i->info.fbops = &stmfb_ops;

  if((ret = stmfb_parse_module_parameters(i, display)) < 0)
    goto failed0;

  /* can create blitter only after module parameters have been parsed */
  if((ret = stmfb_probe_get_blitter(i,pd))<0)
    goto failed0;

  nPages = (i->ulFBSize + PAGE_SIZE - 1) / PAGE_SIZE;

  i->FBPart = bpa2_find_part ("bigphysarea");
  i->ulPFBBase = i->FBPart ? bpa2_alloc_pages (i->FBPart, nPages, 0, GFP_KERNEL)
                           : 0;
  if (!i->ulPFBBase)
  {
    printk(KERN_WARNING "Failed to allocate fb%d memory, requested size = %lu\n",display, i->ulFBSize);
    ret = -ENOMEM;
    goto failed0;
  }

  /* try to allocate memory from BPA2 as additional memory for graphics
     operations. Basically, this is not vital, but driver loading will still
     fail if an auxsize has been specified in the module parameters which can
     not be satisfied, either because no BPA2 partition 'gfx-memory' exists,
     or if it's not large enough for the given auxsize. */
  /* Please note that due to hardware limitations, this can actually be a bit
     complex (2 & 3):
     1) we look for partitions labelled 'gfx-memory-[0...x]', each of which
        should be 64MB in size and be aligned to a 64MB bank. This makes the
        process quite easy for us.
     2) Failing that, we try to use a partition labelled 'gfx-memory'. Now we
        have to make sure ourselves that each allocation does not cross a 64MB
        bank boundary.
     3) Failing that, too, or in case the 'gfx-memory' partition not being
        large enough to accomodate for the amount of gfx memory requested in
        the module options, we will do the same but this time use the
        'bigphysarea' partition. */
  /* So, either one can configure several 'gfx-memory-%d' partitions
     (preferred, to have maximum control over placement of gfx memory), just
     use one large 'gfx-memory' partition, or configure nothing at all and be
     limited to 'bigphysarea' memory.
     The combined memory of all the allocations will be made available to
     DirectFB. */
  /* FIXME: this code is way too complex! */
#define MEMORY64MB_SIZE      (1<<26)
  if (i->AuxSize[0])
  {
    /* aux memory was requested */
    unsigned int       idx;        /* index into i->AuxPart[] */
    /* order of partnames is important here! */
    static const char *partnames[] = { "gfx-memory-%d", "gfx-memory", "bigphysarea" };
    unsigned int       partnameidx /* index into partnames[] */,
                       partidx     /* index in case of 'gfx-memory-%d' */;
    char               partname[14]; /* real name in case of 'gfx-memory-%d' */
    struct bpa2_part  *part;
    unsigned long      still_needed = i->AuxSize[0];

    unsigned long      this_alloc;
    int                alignment;

    partidx = 0;
    /* find a partition suitable for us, in the preferred order, as outlined
       above */
    for (partnameidx = 0; partnameidx < ARRAY_SIZE (partnames); ++partnameidx)
    {
      sprintf (partname, partnames[partnameidx], 0);
      part = bpa2_find_part (partname);
      if (part)
        break;
    }

    if (!part)
    {
      printk (KERN_ERR "no BPA2 partitions found for auxmem!\n");
      goto failed_bigphys;
    }

    idx = 0;

restart:
    if (still_needed >= MEMORY64MB_SIZE)
    {
      /* we first try to satisfy from a 64MB aligned region */
      this_alloc = MEMORY64MB_SIZE;
      /* don't request a specific alignment if individual 'gfx-memory-%d'
         partitions are configured in the kernel */
      alignment  = (partnameidx == 0) ? 0 : MEMORY64MB_SIZE / PAGE_SIZE;
    }
    else
    {
      /* if requested size is < 64MB, we are optimistic and hope it will
         fit into one 64MB bank without alignment restrictions. This
         doesn't mean we will be happy with a region crossing that
         boundary, i.e. we will still make sure all restrictions are met.
         We are really being just optimistic here. */
      this_alloc = still_needed;
      alignment  = 0;
    }

    while (part && idx < ARRAY_SIZE (i->AuxPart) && still_needed)
    {
      int           this_pages;
      unsigned long base;

      printk (KERN_INFO "trying to alloc %lu bytes (align: %.4x, still needed:"
                        " %lu) from '%s' for GFX%d auxmem\n",
              this_alloc, alignment, still_needed, partname, display);

      this_pages = (this_alloc + PAGE_SIZE - 1) / PAGE_SIZE;
      base = bpa2_alloc_pages (part, this_pages, alignment, GFP_KERNEL);
      if (base)
      {
        /* make sure it doesn't cross a 64MB boundary. At some point we will
           be using the new BPA2 allocator API... */
        if ((base & ~(MEMORY64MB_SIZE - 1)) != ((base + this_alloc - 1) & ~(MEMORY64MB_SIZE - 1)))
        {
          unsigned long topAddress;

          printk ("%s: %8.lx + %lu (%d pages) crosses a 64MB boundary, retrying!\n",
                  __FUNCTION__, base, this_alloc, this_pages);

          /* free and try a new reservation with different attributes,
             hoping nobody requests bpa2 memory in between... */
          bpa2_free_pages (part, base);

          if (partnameidx == 0)
          {
            /* if we have 'gfx-memory-%d' partitions, try the next one */
            /* Getting here doesn't neccessarily mean there is an error in
               the BPA2 partition definition. This can happen if e.g. two
               framebuffers are configured and the auxmem size requested is
               larger but not a multiple of 64MB (since we are optimistic and
               try to re-use partially used partitions), so don't be tempted
               to put a WARN_ON() here! */
            sprintf (partname, partnames[partnameidx], ++partidx);
            part = bpa2_find_part (partname);
            continue;
          }

          if (still_needed == this_alloc && alignment == 0)
          {
            /* this can only happen on the last chunk of memory needed. So
               we first try to fit it into its own region.
               I.e. we first try to put the last chunk into a partly used
               bank. If we succeed, but the chunk now spans a boundary
               (which gets us here), we try to put it into its own
               bank. If that fails, too, we will come back again (this
               time with alignment == 1) and split the chunk into two. */
            alignment = MEMORY64MB_SIZE / PAGE_SIZE;
            continue;
          }

          /* standard case, allocate up to the end of current 64MB bank,
             effectively splitting the current chunk into two. */
#define _ALIGN_UP(addr,size)    (((addr)+((size)-1))&(~((size)-1)))
          topAddress = _ALIGN_UP (base, MEMORY64MB_SIZE);
          this_alloc = topAddress - base;
          this_pages = (this_alloc + PAGE_SIZE - 1) / PAGE_SIZE;

          /* alignment should be 0 or 1 here */
          WARN_ON (alignment != 0
                   && alignment != 1);
          base = bpa2_alloc_pages (part, this_pages, alignment, GFP_KERNEL);
          if (!base)
          {
            /* shouldn't happen here, since we just moments ago suceeded in
               allocating even more memory than now, so we don't know what
               to do and kindof panic ...*/
            break;
          }
        }

        /* we now have found a more or less nice place to chill. */
        i->AuxPart[idx] = part;
        i->AuxBase[idx] = base;
        i->AuxSize[idx] = this_alloc;
        ++idx;

        still_needed -= this_alloc;

        printk (KERN_INFO "success: base: %.8lx, still needed: %lu\n",
                base, still_needed);

        if (still_needed >= MEMORY64MB_SIZE)
        {
          /* we first try to satisfy from a 64MB aligned region */
          this_alloc = MEMORY64MB_SIZE;
          alignment  = (partnameidx == 0) ? 0 : MEMORY64MB_SIZE / PAGE_SIZE;
        }
        else
        {
          /* if requested size is < 64MB we are on the last chunk of memory
             blocks. We are otimistic and hope it will fit into a possibly
             already partly used 64MB bank without alignment restrictions,
             i.e. we hope it will not span a 64MB boundary. If that's not
             the case, we will again use an alignment of 64MB, so as to
             reduce scattering of the auxmem chunks. */
          this_alloc = still_needed;
          alignment  = 0;

          /* if using 'gfx-memory-%d' partitions, try to share the last
             chunk with another chunk (in a possibly partly used
             partition). */
          if (partnameidx == 0)
            partidx = -1;
        }

        if (partnameidx == 0)
        {
          /* in case we are using 'gfx-memory-%d' as partition name, make
             sure we advance to the next partition */
          sprintf (partname, partnames[partnameidx], ++partidx);
          part = bpa2_find_part (partname);
        }
      }
      else
      {
        if (alignment > 1)
        {
          if (still_needed == this_alloc)
            /* make sure not to get into an endless loop which would be
               switching around the alignment all the time. */
            alignment = 1;
          else
            /* try again with different alignment rules */
            alignment = 0;
        }
        else
        {
          /* failed: couldn't allocate any memory at all, even without any
             alignment restrictions.
             1) In case there are 'gfx-memory-%d' partitions, we advance
                to the next one.
             2) In case we had been using 'gfx-memory' as partition name,
                we now try 'bigphysarea'*/
          if (partnameidx == 0)
          {
            /* use next 'gfx-memory-%d' partition */
            BUG_ON (strcmp (partnames[partnameidx], "gfx-memory-%d"));
            sprintf (partname, partnames[partnameidx], ++partidx);
            part = bpa2_find_part (partname);
          }
          else if (partnameidx == 1)
          {
            /* advance from 'gfx-memory' to 'bigphysarea' partition name */
            BUG_ON (strcmp (partnames[partnameidx], "gfx-memory"));
            sprintf (partname, partnames[++partnameidx], 0);
            BUG_ON (strcmp (partnames[partnameidx], "bigphysarea"));
            part = bpa2_find_part (partname);
            goto restart;
          }
          else
            /* that's it, there are no partitions left to check for auxmem */
            break;
        }
      }
    }

    if (still_needed)
    {
      /* not enough chill space configured for BPA2 */
      printk (KERN_WARNING "Failed to allocate enough fb%d auxmem "
                           "(still need %lu bytes)\n",
              display, still_needed);
      ret = -ENOMEM;
      goto failed_auxmem;
    }
  }

  if(stm_display_plane_connect_to_output(i->pFBPlane, i->pFBMainOutput) < 0)
  {
    printk(KERN_ERR "fb%d display cannot be connected to display output pipeline, this is very bad!\n",display);
    ret = -EIO;
    goto failed_auxmem;
  }

  if(stm_display_plane_lock(i->pFBPlane) < 0)
  {
    printk(KERN_WARNING "fb%d display plane may already be in use by another driver\n",display);
    ret = -EBUSY;
    goto failed_auxmem;
  }

  i->main_config.activate = STMFBIO_ACTIVATE_IMMEDIATE;
  if(stmfb_set_output_configuration(&i->main_config,i)<0)
  {
    printk(KERN_WARNING "fb%d main output configuration is unsupported\n",display);
    ret = -EINVAL;
    goto failed_auxmem;
  }

  /*
   * Get the framebuffer layer default extended var state and capabilities
   */
  i->current_var_ex.layerid = 0;

  if((ret = stmfb_encode_var_ex(&i->current_var_ex, i)) < 0)
  {
    printk(KERN_WARNING "fb%d failed to get display plane's extended capabilities\n",display);
    goto failed_auxmem;
  }

  /* Setup the framebuffer info for registration */
  i->info.screen_base = ioremap_nocache(i->ulPFBBase,i->ulFBSize);
  i->info.flags       = FBINFO_DEFAULT |
                        FBINFO_PARTIAL_PAN_OK |
                        FBINFO_HWACCEL_YPAN;

  strcpy(i->info.fix.id, name);  /* identification string */

  i->info.fix.smem_start  = i->ulPFBBase;                /* Start of frame buffer mem (physical address)               */
  i->info.fix.smem_len    = i->ulFBSize;                 /* Length of frame buffer mem as the device sees it           */
  i->info.fix.mmio_start  = pd->mmio;                    /* memory mapped register access     */
  i->info.fix.mmio_len    = pd->mmio_len;                /* Length of Memory Mapped I/O       */
  i->info.fix.type        = FB_TYPE_PACKED_PIXELS;       /* see FB_TYPE_*                     */
  i->info.fix.type_aux    = 0;                           /* Interleave for interleaved Planes */
  i->info.fix.xpanstep    = 0;                           /* zero if no hardware panning       */
  i->info.fix.ypanstep    = 1;                           /* zero if no hardware panning       */
  i->info.fix.ywrapstep   = 0;                           /* zero if no hardware ywrap         */

  i->info.pseudo_palette = &i->pseudo_palette;

  if(fb_alloc_cmap(&i->info.cmap, 256, 1)<0)
  {
    printk(KERN_ERR "fb%d unable to allocate colour map\n",display);
    ret = -ENOMEM;
    goto failed_ioremap;
  }

  if(i->info.cmap.len != 256)
  {
    printk(KERN_ERR "fb%d WTF colour map length is wrong????\n",display);
  }

  if (register_framebuffer(&i->info) < 0)
  {
    printk(KERN_ERR "fb%d register =_framebuffer failed!\n",display);
    ret = -ENODEV;
    goto failed_cmap;
  }

  /*
   * If there was no console activated on the registered framebuffer, we have
   * to force the display mode be updated. This sequence is cribbed from the
   * matroxfb driver.
   */
  if(!i->current_videomode_valid)
  {
    i->info.var.activate |= FB_ACTIVATE_FORCE;
    if((ret = fb_set_var(&i->info, &i->info.var))<0)
    {
      printk(KERN_WARNING "fb%d failed to set default display mode, display pipeline may already be in use\n",display);
      goto failed_register;
    }
  }

//#if (defined(UFS912) || defined(HS7110) || defined(HS7119) || defined(HS7420) || defined(HS7429) || defined(HS7810A) || defined(HS7819) || defined(ATEMIO520) || defined(ATEMIO530) || defined(SPARK) || defined(AT7500)) && defined(__TDT__)
  // WORKAROUND: Clear the framebuffer 
  memset(i->info.screen_base, 0x00, i->ulFBSize);
//#endif

  stmfb_init_class_device(i);

  DPRINTK("out\n");

  return 0;

failed_register:
  fb_registered = 1;
  unregister_framebuffer (&i->info);

failed_cmap:
  fb_dealloc_cmap (&i->info.cmap);

failed_ioremap:
  iounmap (i->info.screen_base);
  i->info.screen_base = NULL;

failed_auxmem:
  stmfb_destroy_auxmem (i);

failed_bigphys:
  bpa2_free_pages (i->FBPart, i->ulPFBBase);
  i->ulPFBBase = 0;
  i->FBPart = NULL;

failed0:
  if (!fb_registered)
    /* unregister_framebuffer() does fb_destroy_modelist() for us, so don't
       destroy it twice! */
    fb_destroy_modelist (&i->info.modelist);

  return ret;
}
示例#19
0
int main(void)
{
	DEFINE(THREAD, offsetof(struct task_struct, thread));
	DEFINE(MM, offsetof(struct task_struct, mm));
	DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
	DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
	DEFINE(SIGSEGV, SIGSEGV);
	DEFINE(NMI_MASK, NMI_MASK);
	DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
	DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
	DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
#else
	DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
	DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
	DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */

	DEFINE(KSP, offsetof(struct thread_struct, ksp));
	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
	DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
#endif
	DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
	DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
	DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
	DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
#ifdef CONFIG_ALTIVEC
	DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
	DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
	DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
	DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
	DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
	DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
	DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#else /* CONFIG_PPC64 */
	DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
#ifdef CONFIG_SPE
	DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
	DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
	DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
	DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
	DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0));
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
	DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
	DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
#endif

#ifdef CONFIG_PPC_BOOK3S_64
	DEFINE(THREAD_TAR, offsetof(struct thread_struct, tar));
	DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
	DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
	DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
	DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
	DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
	DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
	DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
	DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
	DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
	DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
	DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
	DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
	DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
	DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
	DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
	DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
						 transact_vr));
	DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
					    transact_vrsave));
	DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
						 transact_fp));
	/* Local pt_regs on stack for Transactional Memory funcs. */
	DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
	       sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
	DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
	DEFINE(TI_TASK, offsetof(struct thread_info, task));
	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));

#ifdef CONFIG_PPC64
	DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
	DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
	DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
	DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
	DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
	DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
	/* paca */
	DEFINE(PACA_SIZE, sizeof(struct paca_struct));
	DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
	DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
	DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
	DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
	DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
	DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
	DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
	DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
	DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
	DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
	DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
	DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
	DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
	DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
					    context.low_slices_psize));
	DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
					    context.high_slices_psize));
	DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */

#ifdef CONFIG_PPC_BOOK3E
	DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
	DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd));
	DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
	DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb));
	DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
	DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit));
	DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg));
	DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
	DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
	DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
	DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr));

	DEFINE(TCD_ESEL_NEXT,
		offsetof(struct tlb_core_data, esel_next));
	DEFINE(TCD_ESEL_MAX,
		offsetof(struct tlb_core_data, esel_max));
	DEFINE(TCD_ESEL_FIRST,
		offsetof(struct tlb_core_data, esel_first));
	DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
#endif /* CONFIG_PPC_BOOK3E */

#ifdef CONFIG_PPC_STD_MMU_64
	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
	DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_PPC_MM_SLICES
	DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
#else
	DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
#endif /* CONFIG_PPC_MM_SLICES */
	DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
	DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
	DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
	DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
	DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
	DEFINE(SLBSHADOW_STACKVSID,
	       offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
	DEFINE(SLBSHADOW_STACKESID,
	       offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
	DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
	DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
	DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
	DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
	DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
	DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
#ifdef CONFIG_PPC_BOOK3S_64
	DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp));
	DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
#endif
	DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
	DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
	DEFINE(PACA_DSCR, offsetof(struct paca_struct, dscr_default));
	DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
	DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
	DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
	DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
	DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
	DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
#endif /* CONFIG_PPC64 */

	/* RTAS */
	DEFINE(RTASBASE, offsetof(struct rtas_t, base));
	DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));

	/* Interrupt register frame */
	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
#ifdef CONFIG_PPC64
	/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
	DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
	DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);

	/* hcall statistics */
	DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats));
	DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls));
	DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total));
	DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total));
#endif /* CONFIG_PPC64 */
	DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
	DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
	DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
	DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
	DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
	DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
	DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
	DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
	DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
	DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
	DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
	DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
	DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
	DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
#ifndef CONFIG_PPC64
	DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
	DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
	DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
	DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
	DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
	DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
	DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
	DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
	DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
	DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
	DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
	DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
	DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
	DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
	DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
	DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
	DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
	DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
#endif /* CONFIG_PPC64 */
	/*
	 * Note: these symbols include _ because they overlap with special
	 * register names
	 */
	DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
	DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
	DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
	DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
	DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
	DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
	DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
	DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
	DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
	DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
	DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
#ifndef CONFIG_PPC64
	DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
	/*
	 * The PowerPC 400-class & Book-E processors have neither the DAR
	 * nor the DSISR SPRs. Hence, we overload them to hold the similar
	 * DEAR and ESR SPRs for such processors.  For critical interrupts
	 * we use them to hold SRR0 and SRR1.
	 */
	DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
	DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
#else /* CONFIG_PPC64 */
	DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));

	/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
	DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
	DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
#endif /* CONFIG_PPC64 */

#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
	DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
	DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
	DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
	DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
	DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
	DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
	DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
	DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
	DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
	DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
	DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
	DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
	DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
#endif
	DEFINE(CLONE_VM, CLONE_VM);
	DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);

#ifndef CONFIG_PPC64
	DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
#endif /* ! CONFIG_PPC64 */

	/* About the CPU features table */
	DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
	DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
	DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));

	DEFINE(pbe_address, offsetof(struct pbe, address));
	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
	DEFINE(pbe_next, offsetof(struct pbe, next));

#ifndef CONFIG_PPC64
	DEFINE(TASK_SIZE, TASK_SIZE);
	DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
#endif /* ! CONFIG_PPC64 */

	/* datapage offsets for use by vdso */
	DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
	DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
	DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
	DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
	DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
	DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
	DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
	DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
	DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
	DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
	DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
	DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction));
	DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
	DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
	DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
	DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size));
#ifdef CONFIG_PPC64
	DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64));
	DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
	DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
	DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
	DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
	DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec));
	DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec));
	DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec));
	DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
#else
	DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
	DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
	DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec));
	DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
#endif
	/* timeval/timezone offsets for use by vdso */
	DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
	DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));

	/* Other bits used by the vdso */
	DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
	DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
	DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
	DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);

#ifdef CONFIG_BUG
	DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif

	DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
	DEFINE(PTE_SIZE, sizeof(pte_t));

#ifdef CONFIG_KVM
	DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
	DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
	DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
	DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
	DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
	DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
#ifdef CONFIG_ALTIVEC
	DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
#endif
	DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
	DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
#ifdef CONFIG_PPC_BOOK3S
	DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
#endif
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
	DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
	DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
	DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
	DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
	DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
	DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
	DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
#endif
	DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3));
	DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
	DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
	DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
	DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
	DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
	DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
	DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
	DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
	DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian));
#endif

	DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
	DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
	DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
	DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
	DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
	DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));

	DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
	DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));

	/* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
	DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
	DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
	DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
	DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
	DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
	DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
	DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
	DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
	DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
	DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
#endif
#ifdef CONFIG_PPC_BOOK3S
	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
	DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
	DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
	DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
	DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
	DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
	DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
	DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
	DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
	DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
	DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
	DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
	DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
	DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
	DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
	DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
	DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
	DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
	DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
	DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
	DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
	DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
	DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
	DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
	DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
	DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
	DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
	DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
	DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
	DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
	DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr));
	DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
	DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
	DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
	DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
	DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
	DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
	DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
	DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
	DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
	DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
	DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
	DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
	DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
	DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
	DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
	DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
	DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
	DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
	DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
	DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
	DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
	DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
	DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
	DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
	DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
	DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
	DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
	DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
	DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
#endif

#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
	DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else
# define SVCPU_FIELD(x, f)
#endif
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
#else	/* 32-bit */
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
#endif

	SVCPU_FIELD(SVCPU_CR, cr);
	SVCPU_FIELD(SVCPU_XER, xer);
	SVCPU_FIELD(SVCPU_CTR, ctr);
	SVCPU_FIELD(SVCPU_LR, lr);
	SVCPU_FIELD(SVCPU_PC, pc);
	SVCPU_FIELD(SVCPU_R0, gpr[0]);
	SVCPU_FIELD(SVCPU_R1, gpr[1]);
	SVCPU_FIELD(SVCPU_R2, gpr[2]);
	SVCPU_FIELD(SVCPU_R3, gpr[3]);
	SVCPU_FIELD(SVCPU_R4, gpr[4]);
	SVCPU_FIELD(SVCPU_R5, gpr[5]);
	SVCPU_FIELD(SVCPU_R6, gpr[6]);
	SVCPU_FIELD(SVCPU_R7, gpr[7]);
	SVCPU_FIELD(SVCPU_R8, gpr[8]);
	SVCPU_FIELD(SVCPU_R9, gpr[9]);
	SVCPU_FIELD(SVCPU_R10, gpr[10]);
	SVCPU_FIELD(SVCPU_R11, gpr[11]);
	SVCPU_FIELD(SVCPU_R12, gpr[12]);
	SVCPU_FIELD(SVCPU_R13, gpr[13]);
	SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
	SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
	SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
	SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
	SVCPU_FIELD(SVCPU_SR, sr);
#endif
#ifdef CONFIG_PPC64
	SVCPU_FIELD(SVCPU_SLB, slb);
	SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
	SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr);
#endif

	HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
	HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
	HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
	HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
	HSTATE_FIELD(HSTATE_NAPPING, napping);

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
	HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
	HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
	HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
	HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
	HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
	HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
	HSTATE_FIELD(HSTATE_PTID, ptid);
	HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
	HSTATE_FIELD(HSTATE_PMC, host_pmc);
	HSTATE_FIELD(HSTATE_PURR, host_purr);
	HSTATE_FIELD(HSTATE_SPURR, host_spurr);
	HSTATE_FIELD(HSTATE_DSCR, host_dscr);
	HSTATE_FIELD(HSTATE_DABR, dabr);
	HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
	DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

#ifdef CONFIG_PPC_BOOK3S_64
	HSTATE_FIELD(HSTATE_CFAR, cfar);
	HSTATE_FIELD(HSTATE_PPR, ppr);
	HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr);
#endif /* CONFIG_PPC_BOOK3S_64 */

#else /* CONFIG_PPC_BOOK3S */
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
	DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
	DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
	DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
	DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9));
	DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
	DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
	DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
	DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save));
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_KVM */

#ifdef CONFIG_KVM_GUEST
	DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
					    scratch1));
	DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared,
					    scratch2));
	DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared,
					    scratch3));
	DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared,
				       int_pending));
	DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
	DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared,
					    critical));
	DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr));
#endif

#ifdef CONFIG_44x
	DEFINE(PGD_T_LOG2, PGD_T_LOG2);
	DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
	DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
	DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
	DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
	DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
	DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
	DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
#endif

#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
	DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
	DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
	DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
	DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
#endif

#ifdef CONFIG_KVM_BOOKE_HV
	DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
	DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
	DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
#endif

#ifdef CONFIG_KVM_EXIT_TIMING
	DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
						arch.timing_exit.tv32.tbu));
	DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
						arch.timing_exit.tv32.tbl));
	DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
					arch.timing_last_enter.tv32.tbu));
	DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
					arch.timing_last_enter.tv32.tbl));
#endif

	return 0;
}
示例#20
0
文件: addRamDisk.c 项目: 274914765/C
int main(int argc, char **argv)
{
    char inbuf[4096];
    struct addr_range vmlinux;
    FILE *ramDisk;
    FILE *inputVmlinux;
    FILE *outputVmlinux;

    char *rd_name, *lx_name, *out_name;

    size_t i;
    unsigned long ramFileLen;
    unsigned long ramLen;
    unsigned long roundR;
    unsigned long offset_end;

    unsigned long kernelLen;
    unsigned long actualKernelLen;
    unsigned long round;
    unsigned long roundedKernelLen;
    unsigned long ramStartOffs;
    unsigned long ramPages;
    unsigned long roundedKernelPages;
    unsigned long hvReleaseData;
    u_int32_t eyeCatcher = 0xc8a5d9c4;
    unsigned long naca;
    unsigned long xRamDisk;
    unsigned long xRamDiskSize;
    long padPages;
  
  
    if (argc < 2) {
        fprintf(stderr, "Name of RAM disk file missing.\n");
        exit(1);
    }
    rd_name = argv[1];

    if (argc < 3) {
        fprintf(stderr, "Name of vmlinux file missing.\n");
        exit(1);
    }
    lx_name = argv[2];

    if (argc < 4) {
        fprintf(stderr, "Name of vmlinux output file missing.\n");
        exit(1);
    }
    out_name = argv[3];


    ramDisk = fopen(rd_name, "r");
    if ( ! ramDisk ) {
        fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", rd_name);
        exit(1);
    }

    inputVmlinux = fopen(lx_name, "r");
    if ( ! inputVmlinux ) {
        fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", lx_name);
        exit(1);
    }
  
    outputVmlinux = fopen(out_name, "w+");
    if ( ! outputVmlinux ) {
        fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", out_name);
        exit(1);
    }

    i = fread(inbuf, 1, sizeof(inbuf), inputVmlinux);
    if (i != sizeof(inbuf)) {
        fprintf(stderr, "can not read vmlinux file %s: %u\n", lx_name, i);
        exit(1);
    }

    i = check_elf64(inbuf, sizeof(inbuf), &vmlinux);
    if (i == 0) {
        fprintf(stderr, "You must have a linux kernel specified as argv[2]\n");
        exit(1);
    }

    /* Input Vmlinux file */
    fseek(inputVmlinux, 0, SEEK_END);
    kernelLen = ftell(inputVmlinux);
    fseek(inputVmlinux, 0, SEEK_SET);
    printf("kernel file size = %lu\n", kernelLen);

    actualKernelLen = kernelLen - ElfHeaderSize;

    printf("actual kernel length (minus ELF header) = %lu\n", actualKernelLen);

    round = actualKernelLen % 4096;
    roundedKernelLen = actualKernelLen;
    if ( round )
        roundedKernelLen += (4096 - round);
    printf("Vmlinux length rounded up to a 4k multiple = %ld/0x%lx \n", roundedKernelLen, roundedKernelLen);
    roundedKernelPages = roundedKernelLen / 4096;
    printf("Vmlinux pages to copy = %ld/0x%lx \n", roundedKernelPages, roundedKernelPages);

    offset_end = _ALIGN_UP(vmlinux.memsize, 4096);
    /* calc how many pages we need to insert between the vmlinux and the start of the ram disk */
    padPages = offset_end/4096 - roundedKernelPages;

    /* Check and see if the vmlinux is already larger than _end in System.map */
    if (padPages < 0) {
        /* vmlinux is larger than _end - adjust the offset to the start of the embedded ram disk */ 
        offset_end = roundedKernelLen;
        printf("vmlinux is larger than _end indicates it needs to be - offset_end = %lx \n", offset_end);
        padPages = 0;
        printf("will insert %lx pages between the vmlinux and the start of the ram disk \n", padPages);
    }
    else {
        /* _end is larger than vmlinux - use the offset to _end that we calculated from the system map */
        printf("vmlinux is smaller than _end indicates is needed - offset_end = %lx \n", offset_end);
        printf("will insert %lx pages between the vmlinux and the start of the ram disk \n", padPages);
    }



    /* Input Ram Disk file */
    // Set the offset that the ram disk will be started at.
    ramStartOffs = offset_end;  /* determined from the input vmlinux file and the system map */
    printf("Ram Disk will start at offset = 0x%lx \n", ramStartOffs);
  
    fseek(ramDisk, 0, SEEK_END);
    ramFileLen = ftell(ramDisk);
    fseek(ramDisk, 0, SEEK_SET);
    printf("%s file size = %ld/0x%lx \n", rd_name, ramFileLen, ramFileLen);

    ramLen = ramFileLen;

    roundR = 4096 - (ramLen % 4096);
    if ( roundR ) {
        printf("Rounding RAM disk file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
        ramLen += roundR;
    }

    printf("Rounded RAM disk size is %ld/0x%lx \n", ramLen, ramLen);
    ramPages = ramLen / 4096;
    printf("RAM disk pages to copy = %ld/0x%lx\n", ramPages, ramPages);



  // Copy 64K ELF header
    for (i=0; i<(ElfPages); ++i) {
        get4k( inputVmlinux, inbuf );
        put4k( outputVmlinux, inbuf );
    }

    /* Copy the vmlinux (as full pages). */
    fseek(inputVmlinux, ElfHeaderSize, SEEK_SET);
    for ( i=0; i<roundedKernelPages; ++i ) {
        get4k( inputVmlinux, inbuf );
        put4k( outputVmlinux, inbuf );
    }
  
    /* Insert pad pages (if appropriate) that are needed between */
    /* | the end of the vmlinux and the ram disk. */
    for (i=0; i<padPages; ++i) {
        memset(inbuf, 0, 4096);
        put4k(outputVmlinux, inbuf);
    }

    /* Copy the ram disk (as full pages). */
    for ( i=0; i<ramPages; ++i ) {
        get4k( ramDisk, inbuf );
        put4k( outputVmlinux, inbuf );
    }

    /* Close the input files */
    fclose(ramDisk);
    fclose(inputVmlinux);
    /* And flush the written output file */
    fflush(outputVmlinux);



    /* Fixup the new vmlinux to contain the ram disk starting offset (xRamDisk) and the ram disk size (xRamDiskSize) */
    /* fseek to the hvReleaseData pointer */
    fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
    if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
        death("Could not read hvReleaseData pointer\n", outputVmlinux, out_name);
    }
    hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
    printf("hvReleaseData is at %08lx\n", hvReleaseData);

    /* fseek to the hvReleaseData */
    fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
    if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
        death("Could not read hvReleaseData\n", outputVmlinux, out_name);
    }
    /* Check hvReleaseData sanity */
    if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
        death("hvReleaseData is invalid\n", outputVmlinux, out_name);
    }
    /* Get the naca pointer */
    naca = ntohl(*((u_int32_t*) &inbuf[0x0C])) - KERNELBASE;
    printf("Naca is at offset 0x%lx \n", naca);

    /* fseek to the naca */
    fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
    if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
        death("Could not read naca\n", outputVmlinux, out_name);
    }
    xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
    xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
    /* Make sure a RAM disk isn't already present */
    if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
        death("RAM disk is already attached to this kernel\n", outputVmlinux, out_name);
    }
    /* Fill in the values */
    *((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
    *((u_int32_t *) &inbuf[0x14]) = htonl(ramPages);

    /* Write out the new naca */
    fflush(outputVmlinux);
    fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
    if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
        death("Could not write naca\n", outputVmlinux, out_name);
    }
    printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08lx\n",
           ramPages, ramStartOffs);

    /* Done */
    fclose(outputVmlinux);
    /* Set permission to executable */
    chmod(out_name, S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);

    return 0;
}