Example #1
0
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
	/*
	 * Set up the executable regions using the existing section mappings
	 * for now. This will get more fine grained later once all memory
	 * is mapped
	 */
	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);

	if (end < kernel_x_start) {
		create_mapping(start, __phys_to_virt(start),
			end - start, PAGE_KERNEL);
	} else if (start >= kernel_x_end) {
		create_mapping(start, __phys_to_virt(start),
			end - start, PAGE_KERNEL);
	} else {
		if (start < kernel_x_start)
			create_mapping(start, __phys_to_virt(start),
				kernel_x_start - start,
				PAGE_KERNEL);
		create_mapping(kernel_x_start,
				__phys_to_virt(kernel_x_start),
				kernel_x_end - kernel_x_start,
				PAGE_KERNEL_EXEC);
		if (kernel_x_end < end)
			create_mapping(kernel_x_end,
				__phys_to_virt(kernel_x_end),
				end - kernel_x_end,
				PAGE_KERNEL);
	}

}
Example #2
0
static void free_owl_reserved_memory(unsigned int free_start, unsigned int free_size)
{
    unsigned long n, start, end;
#ifdef	CONFIG_HIGHMEM
    unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
#endif

    start = free_start;
    end = free_start + free_size;
    if( (start >> PAGE_SHIFT) <= max_low ) {
        if( (end >> PAGE_SHIFT) > max_low )
            end = max_low << PAGE_SHIFT;
        n = free_reserved_area(__phys_to_virt(start), __phys_to_virt(end), 0, NULL);
        printk("free reserve pages %lu to buddy system\n", n);
    }

#ifdef	CONFIG_HIGHMEM
    start = free_start >> PAGE_SHIFT;
    end = (free_start + free_size) >> PAGE_SHIFT;
    if( end > max_low ) {
        if(start < max_low)
            start = max_low;
    	for (n = start; n < end; n++)
    		free_highmem_page(pfn_to_page(n));
        printk("free reserve high memory pages %lu to buddy system\n", end - start);
    }
#endif
}
void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call below).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));

	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
				    (void *)mod_shadow_start);
	kasan_populate_early_shadow((void *)kimg_shadow_end,
				    kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_early_shadow((void *)mod_shadow_end,
					    (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
				   (unsigned long)kasan_mem_to_shadow(end),
				   early_pfn_to_nid(virt_to_pfn(start)));
	}
int meson_power_suspend(void)
{
	static int test_flag = 0;
	unsigned addr;
	unsigned p_addr;
	void	(*pwrtest_entry)(unsigned,unsigned,unsigned,unsigned);

	check_in_param();
	flush_cache_all();

	addr = 0x04F04400;//entry.s start
	p_addr = (unsigned)__phys_to_virt(addr);
	pwrtest_entry = (void (*)(unsigned,unsigned,unsigned,unsigned))p_addr;
	if(test_flag != 1234){
		test_flag = 1234;
		printk("initial appf\n");
		pwrtest_entry(APPF_INITIALIZE,0,0,IO_PL310_BASE & 0xffff0000);
	}
	if(AML_WDT_ENABLED){
		disable_watchdog();
		if(awdtv)
			enable_watchdog(awdtv->firmware_timeout*awdtv->one_second);
	}

	printk("power down cpu --\n");
	pwrtest_entry(APPF_POWER_DOWN_CPU,0,0,APPF_SAVE_PMU|APPF_SAVE_VFP|APPF_SAVE_L2 |( IO_PL310_BASE & 0xffff0000));
	if(AML_WDT_ENABLED){
		disable_watchdog();
		if(awdtv)
			enable_watchdog(awdtv->suspend_timeout*awdtv->one_second);
	}
	return 0;
}
Example #5
0
int meson_power_suspend(void)
{
	static int test_flag = 0;
	unsigned addr;
	unsigned p_addr;
	void	(*pwrtest_entry)(unsigned,unsigned,unsigned,unsigned);

	flush_cache_all();

	addr = 0x9FF04400;//entry.s start
	p_addr = (unsigned)__phys_to_virt(addr);
	pwrtest_entry = (void (*)(unsigned,unsigned,unsigned,unsigned))p_addr;
	if(test_flag != 1234){
		test_flag = 1234;
		printk("initial appf\n");
		pwrtest_entry(APPF_INITIALIZE,0,0,0);
	}
#ifdef CONFIG_SUSPEND_WATCHDOG
	DISABLE_SUSPEND_WATCHDOG;
#endif
	printk("power down cpu --\n");
	pwrtest_entry(APPF_POWER_DOWN_CPU,0,0,APPF_SAVE_PMU|APPF_SAVE_VFP|APPF_SAVE_L2);
#ifdef CONFIG_SUSPEND_WATCHDOG
	ENABLE_SUSPEND_WATCHDOG;
#endif
	return 0;
}
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
	/* For normal memory we already have a cacheable mapping. */
	if (pfn_valid(__phys_to_pfn(phys_addr)))
		return (void __iomem *)__phys_to_virt(phys_addr);

	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
				__builtin_return_address(0));
}
Example #7
0
File: mmu.c Project: 1314cc/linux
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
	unsigned long kernel_start = __pa(_text);
	unsigned long kernel_end = __pa(_etext);

	/*
	 * Take care not to create a writable alias for the
	 * read-only text and rodata sections of the kernel image.
	 */

	/* No overlap with the kernel text */
	if (end < kernel_start || start >= kernel_end) {
		__create_pgd_mapping(pgd, start, __phys_to_virt(start),
				     end - start, PAGE_KERNEL,
				     early_pgtable_alloc);
		return;
	}

	/*
	 * This block overlaps the kernel text mapping.
	 * Map the portion(s) which don't overlap.
	 */
	if (start < kernel_start)
		__create_pgd_mapping(pgd, start,
				     __phys_to_virt(start),
				     kernel_start - start, PAGE_KERNEL,
				     early_pgtable_alloc);
	if (kernel_end < end)
		__create_pgd_mapping(pgd, kernel_end,
				     __phys_to_virt(kernel_end),
				     end - kernel_end, PAGE_KERNEL,
				     early_pgtable_alloc);

	/*
	 * Map the linear alias of the [_text, _etext) interval as
	 * read-only/non-executable. This makes the contents of the
	 * region accessible to subsystems such as hibernate, but
	 * protects it from inadvertent modification or execution.
	 */
	__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
			     kernel_end - kernel_start, PAGE_KERNEL_RO,
			     early_pgtable_alloc);
}
static void __init
fixup_csb226(struct machine_desc *desc, struct param_struct *params,
		char **cmdline, struct meminfo *mi)
{
	SET_BANK (0, 0xa0000000, 64*1024*1024);
	mi->nr_banks      = 1;
#if 0
	setup_ramdisk (1, 0, 0, 8192);
	setup_initrd (__phys_to_virt(0xa1000000), 4*1024*1024);
	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
#endif
}
Example #9
0
static void __init
request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
{
	struct resource *res;
	int i;

	kernel_code.start  = __virt_to_phys(init_mm.start_code);
	kernel_code.end    = __virt_to_phys(init_mm.end_code - 1);
	kernel_data.start  = __virt_to_phys(init_mm.end_code);
	kernel_data.end    = __virt_to_phys(init_mm.brk - 1);

	for (i = 0; i < mi->nr_banks; i++) {
		unsigned long virt_start, virt_end;

		if (mi->bank[i].size == 0)
			continue;

		virt_start = __phys_to_virt(mi->bank[i].start);
		virt_end   = virt_start + mi->bank[i].size - 1;

		res = alloc_bootmem_low(sizeof(*res));
		res->name  = "System RAM";
		res->start = __virt_to_phys(virt_start);
		res->end   = __virt_to_phys(virt_end);
		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;

		request_resource(&iomem_resource, res);

		if (kernel_code.start >= res->start &&
		    kernel_code.end <= res->end)
			request_resource(res, &kernel_code);
		if (kernel_data.start >= res->start &&
		    kernel_data.end <= res->end)
			request_resource(res, &kernel_data);
	}

	if (mdesc->video_start) {
		video_ram.start = mdesc->video_start;
		video_ram.end   = mdesc->video_end;
		request_resource(&iomem_resource, &video_ram);
	}

	/*
	 * Some machines don't have the possibility of ever
	 * possessing lp0, lp1 or lp2
	 */
	if (mdesc->reserve_lp0)
		request_resource(&ioport_resource, &lp0);
	if (mdesc->reserve_lp1)
		request_resource(&ioport_resource, &lp1);
	if (mdesc->reserve_lp2)
		request_resource(&ioport_resource, &lp2);
}
Example #10
0
static void __init
fixup_graphicsmaster(struct machine_desc *desc, struct param_struct *params,
		     char **cmdline, struct meminfo *mi)
{
	SET_BANK( 0, 0xc0000000, 16*1024*1024 );
	mi->nr_banks = 1;
	SET_BANK( 1, 0xc8000000, 16*1024*1024 );
	mi->nr_banks = 2;

	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
	setup_ramdisk( 1, 0, 0, 8192 );
	setup_initrd( __phys_to_virt(0xc0800000), 4*1024*1024 );
}
void fixup_lpc(struct machine_desc *desc, struct param_struct *params, char ** cmdline, struct meminfo * mi)
{
	mi->bank[0].start = 0x81000000;
	mi->bank[0].size = 8 * 1024 * 1024;
	mi->bank[0].node = 0;
	mi->nr_banks = 1;

#ifdef CONFIG_BLK_DEV_INITRD
	setup_ramdisk(1, 0, 0, CONFIG_BLK_DEV_RAM_SIZE);
	setup_initrd(__phys_to_virt(0x81700000), 1024 * 1024);
	ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
#endif
}
Example #12
0
static void __init
fixup_simpad(struct machine_desc *desc, struct param_struct *params,
		   char **cmdline, struct meminfo *mi)
{
#ifdef CONFIG_SA1100_SIMPAD_DRAM_64MB /* DRAM */
	SET_BANK( 0, 0xc0000000, 64*1024*1024 );
#else
	SET_BANK( 0, 0xc0000000, 32*1024*1024 );
#endif
	mi->nr_banks = 1;
	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
	setup_ramdisk( 1, 0, 0, 8192 );
	setup_initrd( __phys_to_virt(0xc0800000), 4*1024*1024 );
}
Example #13
0
static void __init
fortunet_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
{
	IMAGE_PARAMS *ip = phys_to_virt(IMAGE_PARAMS_PHYS);
	*cmdline = phys_to_virt(ip->command_line);
#ifdef CONFIG_BLK_DEV_INITRD
	if(ip->ramdisk_ok)
	{
		initrd_start = __phys_to_virt(ip->ramdisk_address);
		initrd_end = initrd_start + ip->ramdisk_size;
	}
#endif
	memmap.bank[0].size = ip->ram_size;
	*mi = memmap;
}
static void ramfile_vma_close(struct vm_area_struct *vma)
{
	struct ramfile_desc *prf;
	unsigned long usize = vma->vm_end - vma->vm_start;

	/* Fill in the ramfile desc (header) */
	prf = (struct ramfile_desc *)__phys_to_virt(__pfn_to_phys(vma->vm_pgoff));
	prf->payload_size = usize;
	prf->flags = RAMFILE_PHYCONT;
	memset((void*)&prf->reserved[0], 0, sizeof(prf->reserved));
	ramdump_attach_ramfile(prf);
#ifdef RAMFILE_DEBUG
	printk(KERN_ERR "ramfile close 0x%x - linked into RDC\n", (unsigned)prf);
#endif
}
static inline unsigned long l2_get_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
	/*
	 * Because range ops can't be done on physical addresses,
	 * we simply install a virtual mapping for it only for the
	 * TLB lookup to occur, hence no need to flush the untouched
	 * memory mapping afterwards (note: a cache flush may happen
	 * in some circumstances depending on the path taken in kunmap_atomic).
	 */
	void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
	return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
#else
	return __phys_to_virt(paddr);
#endif
}
Example #16
0
static inline unsigned long l2_start_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
	/*
	 * Let's do our own fixmap stuff in a minimal way here.
	 * Because range ops can't be done on physical addresses,
	 * we simply install a virtual mapping for it only for the
	 * TLB lookup to occur, hence no need to flush the untouched
	 * memory mapping.  This is protected with the disabling of
	 * interrupts by the caller.
	 */
	unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
	unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
	local_flush_tlb_kernel_page(vaddr);
	return vaddr + (paddr & ~PAGE_MASK);
#else
	return __phys_to_virt(paddr);
#endif
}
static void __init hisi_cma_dev_init(void)
{
	struct cma *cma;
	struct page *page = NULL;
	int i;
#ifdef CONFIG_HISI_KERNELDUMP
	int k;
	struct page *tmp_page = NULL;
#endif

	for(i = 0 ; i < hisi_cma_area_count; i++){
		cma = hisi_cma_areas[i].cma_area;
		if (cma == NULL)
			continue;
		dev_set_cma_area(&hisi_cma_dev[i], cma);
		hisi_cma_areas[i].dev = &hisi_cma_dev[i];
		/* when is 0 mean it is static*/
		if (hisi_cma_areas[i].dynamic == 0){

			page = dma_alloc_from_contiguous(&hisi_cma_dev[i], cma->count, SZ_1M);

#ifdef CONFIG_HISI_KERNELDUMP
			if (page != NULL) {
				tmp_page = page;
				for (k=0;k < cma->count;k++){
					SetPageMemDump(tmp_page);
					tmp_page++;
				}
			}
#endif
			if (hisi_cma_areas[i].sec_prot){
				create_mapping_late(__pfn_to_phys(cma->base_pfn),
									__phys_to_virt(__pfn_to_phys(cma->base_pfn)),
									cma->count * PAGE_SIZE, __pgprot(PROT_DEVICE_nGnRE));
			}
			pr_err("%s:%d page addr 0x%llx size %lu\n", __func__,
					__LINE__, page_to_phys(page),(cma->count<<PAGE_SHIFT)/SZ_1M);
		}
	}
}
Example #18
0
static void __init
fixup_l7200(struct machine_desc *desc, struct param_struct *unused,
             char **cmdline, struct meminfo *mi)
{
        mi->nr_banks      = 1;
        mi->bank[0].start = PHYS_OFFSET;
        mi->bank[0].size  = (32*1024*1024);
        mi->bank[0].node  = 0;

        ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
        setup_ramdisk( 1, 0, 0, CONFIG_BLK_DEV_RAM_SIZE);
        setup_initrd( __phys_to_virt(0xf1000000), 0x005dac7b);

        /* Serial Console COM2 and LCD */
	strcpy( *cmdline, "console=tty0 console=ttyLU1,115200");

        /* Serial Console COM1 and LCD */
	//strcpy( *cmdline, "console=tty0 console=ttyLU0,115200");

        /* Console on LCD */
	//strcpy( *cmdline, "console=tty0");
}
Example #19
0
void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
	kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call bellow).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
			 pfn_to_nid(virt_to_pfn(_text)));

	/*
	 * vmemmap_populate() has populated the shadow region that covers the
	 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
	 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
	 * kasan_populate_zero_shadow() from replacing the page table entries
	 * (PMD or PTE) at the edges of the shadow region for the kernel
	 * image.
	 */
	kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
	kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
				   (void *)mod_shadow_start);
	kasan_populate_zero_shadow((void *)kimg_shadow_end,
				   kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_zero_shadow((void *)mod_shadow_end,
					   (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		/*
		 * end + 1 here is intentional. We check several shadow bytes in
		 * advance to slightly speed up fastpath. In some rare cases
		 * we could cross boundary of mapped shadow, so we just map
		 * some more here.
		 */
		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
				(unsigned long)kasan_mem_to_shadow(end) + 1,
				pfn_to_nid(virt_to_pfn(start)));
	}
Example #20
0
		.virtual	= IO_BOOTROM_BASE,
		.pfn		= __phys_to_pfn(IO_BOOTROM_PHY_BASE),
		.length		= SZ_64K,
		.type		= MT_DEVICE,
	}, {
		.virtual	= IO_SECBUS_BASE,
		.pfn		= __phys_to_pfn(IO_SECBUS_PHY_BASE),
		.length		= SZ_4K,
		.type		= MT_DEVICE,
	}, {
		.virtual	= IO_SECURE_BASE,
		.pfn		= __phys_to_pfn(IO_SECURE_PHY_BASE),
		.length		= SZ_16K,
		.type		= MT_DEVICE,
	},
#ifdef CONFIG_MESON_SUSPEND
	{
		.virtual	= PAGE_ALIGN(__phys_to_virt(0x9ff00000)),
		.pfn		= __phys_to_pfn(0x9ff00000),
		.length		= SZ_1M,
		.type		= MT_MEMORY_NONCACHED,
	},
#endif

};

void __init meson6tv_map_default_io(void)
{
	iotable_init(meson6tv_io_desc, ARRAY_SIZE(meson6tv_io_desc));
}
Example #21
0
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
	create_mapping(start, __phys_to_virt(start), end - start,
			PAGE_KERNEL_EXEC);
}
Example #22
0
    {rIDE2_BASE,            rIDE2_BASE_PHY,         rIDE2_SIZE,         DOMAIN_IO, 0, 1 },
    {rNAND_BASE,            rNAND_BASE_PHY,         rNAND_SIZE,         DOMAIN_IO, 0, 1 },
    {rSD_BASE,              rSD_BASE_PHY,           rSD_SIZE,           DOMAIN_IO, 0, 1 },
    {rCF_BASE,              rCF_BASE_PHY,           rCF_SIZE,           DOMAIN_IO, 0, 1 },
    {rMSPRO_BASE,           rMSPRO_BASE_PHY,        rMSPRO_SIZE,        DOMAIN_IO, 0, 1 },
    {rUART_BASE,            rUART_BASE_PHY,         rUART_SIZE,         DOMAIN_IO, 0, 1 },
    {rMMX_LFB2PHB_BASE,     rMMX_LFB2PHB_BASE_PHY,  rMMX_LFB2PHB_SIZE,  DOMAIN_IO, 0, 1 },
    {rMMX_LMAPORT_BASE,     rMMX_LMAPORT_BASE_PHY,  rMMX_LMAPORT_SIZE,  DOMAIN_IO, 0, 1 },
    {rMMX_VPP_BASE,         rMMX_VPP_BASE_PHY,      rMMX_VPP_SIZE,      DOMAIN_IO, 0, 1 },
    {rMMX_PHP2LFB_BASE,     rMMX_PHP2LFB_BASE_PHY,  rMMX_PHP2LFB_SIZE,  DOMAIN_IO, 0, 1 },
    {rMMX_VDE_BASE,         rMMX_VDE_BASE_PHY,      rMMX_VDE_SIZE,      DOMAIN_IO, 0, 1 },
    LAST_DESC,
};
#else
static struct map_desc pl1029_io_desc[] __initdata = {
    { __phys_to_virt(0x18000000), 0x18000000,   (0x20000000 - 0x18000000), DOMAIN_IO, 0, 1 },
    LAST_DESC,
};
#endif

#if 1   /* for low level debug only. must map eb000000 before invokation */
void ll_puts(const char *s)
{
    for (; *s != '\0' ; s++) {
        if (*s == '\n') {
            while( (__raw_readb(0xdb000400) & 0x02)); /* until not full */
            __raw_writeb('\r', 0xdb000401);
        }
        while( (__raw_readb(0xdb000400) & 0x02)); /* until not full */
        __raw_writeb(*s, 0xdb000401);
    }