/*
 * Copy the user page.  No aliasing to deal with so we can just
 * attack the kernel's existing mapping of these pages.
 */
static void v6_copy_user_highpage_nonaliasing(struct page *to,
	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
	void *kto, *kfrom;

	kfrom = kmap_atomic(from, KM_USER0);
	kto = kmap_atomic(to, KM_USER1);
	copy_page(kto, kfrom);
	__cpuc_flush_dcache_area(kto, PAGE_SIZE);
	kunmap_atomic(kto, KM_USER1);
	kunmap_atomic(kfrom, KM_USER0);
}
Exemplo n.º 2
0
/*
 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
 * If not, nothing is changed. This is okay as the only reason for allocating
 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
 */
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
    pte_t *ptechk;
    pte_t *pte_newpg[PMDS_PER_MID_PAGE];
    pmd_t *pmdp;
    unsigned int level;
    unsigned long flags;
    unsigned long vaddr;
    int i;

    /* Do all allocations first to bail out in error case. */
    for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
        pte_newpg[i] = alloc_p2m_page();
        if (!pte_newpg[i]) {
            for (i--; i >= 0; i--)
                free_p2m_page(pte_newpg[i]);

            return NULL;
        }
    }

    vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);

    for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
        copy_page(pte_newpg[i], pte_pg);
        paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);

        pmdp = lookup_pmd_address(vaddr);
        BUG_ON(!pmdp);

        spin_lock_irqsave(&p2m_update_lock, flags);

        ptechk = lookup_address(vaddr, &level);
        if (ptechk == pte_pg) {
            set_pmd(pmdp,
                    __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
            pte_newpg[i] = NULL;
        }

        spin_unlock_irqrestore(&p2m_update_lock, flags);

        if (pte_newpg[i]) {
            paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
            free_p2m_page(pte_newpg[i]);
        }

        vaddr += PMD_SIZE;
    }

    return lookup_address(addr, &level);
}
Exemplo n.º 3
0
/*
 * Restore page contents for physical pages that were in use during loading
 * hibernation image.  Switch to idmap_pgd so the physical page tables
 * are overwritten with the same contents.
 */
static void notrace arch_restore_image(void *unused)
{
#ifdef CONFIG_MTK_HIBERNATION
	mtk_arch_restore_image();
#else
	struct pbe *pbe;

	cpu_switch_mm(idmap_pgd, &init_mm);
	for (pbe = restore_pblist; pbe; pbe = pbe->next)
		copy_page(pbe->orig_address, pbe->address);

	_soft_restart(virt_to_phys(cpu_resume), false);
#endif
}
Exemplo n.º 4
0
/* if pagedir_p != NULL it also copies the counted pages */
static int count_and_copy_data_pages(struct pbe *pagedir_p)
{
	int chunk_size;
	int nr_copy_pages = 0;
	int pfn;
	struct page *page;
	
#ifdef CONFIG_DISCONTIGMEM
	panic("Discontingmem not supported");
#else
	BUG_ON (max_pfn != num_physpages);
#endif
	for (pfn = 0; pfn < max_pfn; pfn++) {
		page = pfn_to_page(pfn);
		if (PageHighMem(page))
			panic("Swsusp not supported on highmem boxes. Send 1GB of RAM to <*****@*****.**> and try again ;-).");

		if (!PageReserved(page)) {
			if (PageNosave(page))
				continue;

			if ((chunk_size=is_head_of_free_region(page))!=0) {
				pfn += chunk_size - 1;
				continue;
			}
		} else if (PageReserved(page)) {
			BUG_ON (PageNosave(page));

			/*
			 * Just copy whole code segment. Hopefully it is not that big.
			 */
			if ((ADDRESS(pfn) >= (unsigned long) ADDRESS2(&__nosave_begin)) && 
			    (ADDRESS(pfn) <  (unsigned long) ADDRESS2(&__nosave_end))) {
				PRINTK("[nosave %lx]", ADDRESS(pfn));
				continue;
			}
			/* Hmm, perhaps copying all reserved pages is not too healthy as they may contain 
			   critical bios data? */
		} else	BUG();

		nr_copy_pages++;
		if (pagedir_p) {
			pagedir_p->orig_address = ADDRESS(pfn);
			copy_page((void *) pagedir_p->address, (void *) pagedir_p->orig_address);
			pagedir_p++;
		}
	}
	return nr_copy_pages;
}
Exemplo n.º 5
0
static void copy_pages(void)
{
	struct pbe * p = pagedir_save;
	unsigned long pfn;
	int n = 0;

	for (pfn = 0; pfn < max_pfn; pfn++) {
		if (saveable(&pfn)) {
			n++;
			p->orig_address = ADDRESS(pfn);
			copy_page((void *) p->address, 
				  (void *) p->orig_address);
			p++;
		}
	}
	BUG_ON(n != pmdisk_pages);
}
Exemplo n.º 6
0
static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
#if 0
   unsigned int offset = CACHE_COLOUR(vaddr);
   unsigned long from, to;
   struct page *page = virt_to_page(kfrom);

   if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
      __flush_dcache_page(page_mapping(page), page);

   /*
    * Discard data in the kernel mapping for the new page.
    * FIXME: needs this MCRR to be supported.
    */
   __asm__("mcrr  p15, 0, %1, %0, c6   @ 0xec401f06"
           :
           : "r" (kto),
             "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
           : "cc");
#endif

   /*
    * Now copy the page using the same cache colour as the
    * pages ultimate destination.
    */
   spin_lock(&v6_lock);

#if 0
   set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
   set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);

   from = from_address + (offset << PAGE_SHIFT);
   to   = to_address + (offset << PAGE_SHIFT);

   flush_tlb_kernel_page(from);
   flush_tlb_kernel_page(to);
#endif

   cpu_flush_tlb_all();

   copy_page(kto, kfrom);

   spin_unlock(&v6_lock);
}
Exemplo n.º 7
0
void copy_user_highpage(struct page *to, struct page *from,
	unsigned long u_vaddr, struct vm_area_struct *vma)
{
	void *kfrom = page_address(from);
	void *kto = page_address(to);
	int clean_src_k_mappings = 0;

	/*
	 * If SRC page was already mapped in userspace AND it's U-mapping is
	 * not congruent with K-mapping, sync former to physical page so that
	 * K-mapping in memcpy below, sees the right data
	 *
	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
	 * equally valid for SRC page as well
	 */
	if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
		__flush_dcache_page(kfrom, u_vaddr);
		clean_src_k_mappings = 1;
	}

	copy_page(kto, kfrom);

	/*
	 * Mark DST page K-mapping as dirty for a later finalization by
	 * update_mmu_cache(). Although the finalization could have been done
	 * here as well (given that both vaddr/paddr are available).
	 * But update_mmu_cache() already has code to do that for other
	 * non copied user pages (e.g. read faults which wire in pagecache page
	 * directly).
	 */
	clear_bit(PG_dc_clean, &to->flags);

	/*
	 * if SRC was already usermapped and non-congruent to kernel mapping
	 * sync the kernel mapping back to physical page
	 */
	if (clean_src_k_mappings) {
		__flush_dcache_page(kfrom, kfrom);
		set_bit(PG_dc_clean, &from->flags);
	} else {
		clear_bit(PG_dc_clean, &from->flags);
	}
}
Exemplo n.º 8
0
/*
 * Copy the user page.  No aliasing to deal with so we can just
 * attack the kernel's existing mapping of these pages.
 */
static void v6_copy_user_highpage_nonaliasing(struct page *to,
	struct page *from, unsigned long vaddr)
{
	void *kto, *kfrom;

	kfrom = kmap_atomic(from, KM_USER0);
	kto = kmap_atomic(to, KM_USER1);
	copy_page(kto, kfrom);
#ifdef CONFIG_HIGHMEM
	/*
	 * kmap_atomic() doesn't set the page virtual address, and
	 * kunmap_atomic() takes care of cache flushing already.
	 */
	if (page_address(to) != NULL)
#endif
		__cpuc_flush_dcache_page(kto);
	kunmap_atomic(kto, KM_USER1);
	kunmap_atomic(kfrom, KM_USER0);
}
Exemplo n.º 9
0
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
	kto = ((unsigned long)page_address(to) & PAGE_MASK);
	kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
	pto = page_to_phys(to);
	pfrom = page_to_phys(from);

	if (aliasing(vaddr, (unsigned long)kfrom))
		cpu_dcache_wb_page((unsigned long)kfrom);
	if (aliasing(vaddr, (unsigned long)kto))
		cpu_dcache_inval_page((unsigned long)kto);
	local_irq_save(flags);
	vto = kremap0(vaddr, pto);
	vfrom = kremap1(vaddr, pfrom);
	copy_page((void *)vto, (void *)vfrom);
	kunmap01(vfrom);
	kunmap01(vto);
	local_irq_restore(flags);
}
Exemplo n.º 10
0
static int __init init_vdso_vars(void)
{
	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
	int i;
	char *vbase;

	vdso_size = npages << PAGE_SHIFT;
	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
	if (!vdso_pages)
		goto oom;
	for (i = 0; i < npages; i++) {
		struct page *p;
		p = alloc_page(GFP_KERNEL);
		if (!p)
			goto oom;
		vdso_pages[i] = p;
		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
	}

	vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
	if (!vbase)
		goto oom;

	if (memcmp(vbase, ELFMAG, SELFMAG)) {
		printk("VDSO: I'm broken; not ELF\n");
		vdso_enabled = 0;
	}

#define VEXTERN(x) \
	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
#include "vextern.h"
#undef VEXTERN
	vunmap(vbase);
	return 0;

 oom:
	printk("Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
Exemplo n.º 11
0
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

	flush_dcache_page(pg);
}
Exemplo n.º 12
0
//copy from kernel ptr to usr ptr 
void copy_out(int pid, seL4_CPtr reply_cap, copy_out_args* args, int err) {
    if (SOS_DEBUG) printf("copy_out\n");
    if (err || args->count == args->nbyte) {
        args->cb(pid, reply_cap, args, err);
    } else {
        int to_copy = args->nbyte - args->count;
        if ((args->usr_ptr & ~PAGE_MASK) + to_copy > PAGE_SIZE) {
            to_copy = PAGE_SIZE - (args->usr_ptr & ~PAGE_MASK);
        } 
        err = copy_page(args->usr_ptr + args->count
                       ,to_copy
                       ,args->src + args->count
                       ,pid
                       ,copy_out_cb
                       ,args
                       ,reply_cap
                       );
        if (err) {
            args->cb(pid, reply_cap, args, err);
        }
    }
    if (SOS_DEBUG) printf("copy out ended\n");
} 
Exemplo n.º 13
0
Arquivo: p2m.c Projeto: DenisLug/mptcp
static void __init xen_rebuild_p2m_list(unsigned long *p2m)
{
	unsigned int i, chunk;
	unsigned long pfn;
	unsigned long *mfns;
	pte_t *ptep;
	pmd_t *pmdp;
	int type;

	p2m_missing = alloc_p2m_page();
	p2m_init(p2m_missing);
	p2m_identity = alloc_p2m_page();
	p2m_init(p2m_identity);

	p2m_missing_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
	p2m_identity_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		set_pte(p2m_missing_pte + i,
			pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
		set_pte(p2m_identity_pte + i,
			pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
	}

	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
		/*
		 * Try to map missing/identity PMDs or p2m-pages if possible.
		 * We have to respect the structure of the mfn_list_list
		 * which will be built just afterwards.
		 * Chunk size to test is one p2m page if we are in the middle
		 * of a mfn_list_list mid page and the complete mid page area
		 * if we are at index 0 of the mid page. Please note that a
		 * mid page might cover more than one PMD, e.g. on 32 bit PAE
		 * kernels.
		 */
		chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
			P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;

		type = xen_p2m_elem_type(pfn);
		i = 0;
		if (type != P2M_TYPE_PFN)
			for (i = 1; i < chunk; i++)
				if (xen_p2m_elem_type(pfn + i) != type)
					break;
		if (i < chunk)
			/* Reset to minimal chunk size. */
			chunk = P2M_PER_PAGE;

		if (type == P2M_TYPE_PFN || i < chunk) {
			/* Use initial p2m page contents. */
#ifdef CONFIG_X86_64
			mfns = alloc_p2m_page();
			copy_page(mfns, xen_p2m_addr + pfn);
#else
			mfns = xen_p2m_addr + pfn;
#endif
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
			continue;
		}

		if (chunk == P2M_PER_PAGE) {
			/* Map complete missing or identity p2m-page. */
			mfns = (type == P2M_TYPE_MISSING) ?
				p2m_missing : p2m_identity;
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
			continue;
		}

		/* Complete missing or identity PMD(s) can be mapped. */
		ptep = (type == P2M_TYPE_MISSING) ?
			p2m_missing_pte : p2m_identity_pte;
		for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
			pmdp = populate_extra_pmd(
				(unsigned long)(p2m + pfn) + i * PMD_SIZE);
			set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
		}
	}
}
Exemplo n.º 14
0
CEvent_editor_dialog::CEvent_editor_dialog(CEvent event,
                                           qint32 ticket_id,
                                           QList<template_xml_element> *elements, //the templates
                                           QHash<quint32, QString> *bool_names_pnt,
                                           QHash<quint32, QString> *variable_names_pnt,
                                           QHash<quint32, QString> *text_names_pnt,
                                           QString projectPath )
{
    // Event Edit Dialog Constructor
    //* 1   Storing Overloaded Values
    //* 1.1 Validating received cacheEvent
    //* 2   Translating Hash to QStringList
    //* 3   Creating GUI
    //* 4   Loading initial Data

    //* 1 Storing Overloaded Values
    this->ticket_id = ticket_id;
    this->elements = elements; // the template elements
    this->previous_listWidgetIndex = 0;
    this->project_path = projectPath;

    //* 2 Translating Hash to QStringList

    //bools:: 1 :: getting biggest index
    quint32 bools_max = 0;
    QHash<quint32, QString>::iterator b_it = bool_names_pnt->begin();
    for (1; b_it != bool_names_pnt->end(); ++b_it)
    {
        if ( b_it.key() >= bools_max )
            bools_max = b_it.key();
//        qDebug() << i.key() << ": " << i.value() << endl;
    }
    //bools:: 2 :: filling list
    for ( quint32 j = 0; j <= bools_max; j++ )
        this->names_bool.append( bool_names_pnt->value( j ) );

    //vars:: 1 :: getting biggest index
    quint32 vars_max = 0;
    QHash<quint32, QString>::iterator v_it = variable_names_pnt->begin();
    for (1; v_it != variable_names_pnt->end(); ++v_it)
        if ( v_it.key() >= vars_max )
            vars_max = v_it.key();
    //vars:: 2 :: filling list
    for ( quint32 j = 0; j <= vars_max; j++ )
        this->names_variable.append( variable_names_pnt->value( j ) );

    //texts:: 1 :: getting biggest index
    quint32 texts_max = 0;
    QHash<quint32, QString>::iterator t_it = text_names_pnt->begin();
    for (1; t_it != text_names_pnt->end(); ++t_it)
        if ( t_it.key() >= texts_max )
            texts_max = t_it.key();
    //texts:: 2 :: filling list
    for ( quint32 j = 0; j <= texts_max; j++ )
        this->names_texts.append( text_names_pnt->value( j ) );



    //* 3 Creating GUI
    this->widget_edit = new Cevent_input_mask( &this->names_bool, &this->names_variable, projectPath );
    QObject::connect( this->widget_edit->gui_enable_add_condition_btn, SIGNAL(clicked()),
                      this, SLOT(re_minHeight_input_mask()) );

    this->widget_templates = new Ctemplatez_page( );
    QObject::connect( this->widget_templates->pickbox, SIGNAL(currentIndexChanged(int)),
                      this, SLOT(switch_behaviour(int)));
    QObject::connect( this->widget_templates, SIGNAL(apply_code()),
                      this, SLOT(confirm_install_template()) );

    this->btn_templates_edit = new QPushButton( QIcon( ":/resources/img/event_editor_code_snippets.png" ), tr( "Templates" ), this );
    this->btn_templates_edit->setIconSize( QSize( 48,48 ) );
    QObject::connect( this->btn_templates_edit, SIGNAL(clicked()),
                      this, SLOT(switch_templates_and_edit()) );


    this->area_edit = new QScrollArea();
    this->area_edit->setAlignment( Qt::AlignCenter );
    this->area_edit->setWidget( this->widget_edit );
    this->area_edit->setBackgroundRole( QPalette::Dark );
    this->area_edit->setHorizontalScrollBarPolicy( Qt::ScrollBarAlwaysOff );
    this->area_template = new QScrollArea();
    this->area_template->setAlignment( Qt::AlignCenter );
    this->area_template->setWidget( this->widget_templates );
    this->area_template->setBackgroundRole( QPalette::Dark );
    this->area_template->setHorizontalScrollBarPolicy( Qt::ScrollBarAlwaysOff );

    this->stackPit = new QStackedWidget( this );
    this->stackPit->setSizePolicy( QSizePolicy::MinimumExpanding, QSizePolicy::Expanding );
    this->stackPit->addWidget( this->area_edit );
    this->stackPit->addWidget( this->area_template );

    this->pages_label = new QLabel( tr( "Pages" ) );
    this->pages_label->setAlignment( Qt::AlignCenter );
    this->pages_listWidget = new QListWidget();
    this->pages_listWidget->setCurrentRow( 0 );
    QObject::connect( this->pages_listWidget, SIGNAL(clicked(QModelIndex)),
                      this, SLOT(switch_page()) );

    QString str = QString( "%1" ).arg( event.tileLocation );
    this->label_event_info = new QLabel( event.title + "@" + str );
    this->label_event_info->setAlignment( Qt::AlignCenter );
    this->btn_cancel = new QPushButton( QIcon( ":/resources/img/cancel.png" ), tr( "Cancel" ) );
    this->btn_save_exit = new QPushButton( QIcon( ":/resources/img/save.png" ), tr( "Save and exit" ) );

    QObject::connect( this->btn_cancel, SIGNAL(clicked()), this, SLOT(close()));
    QObject::connect( this->btn_save_exit, SIGNAL(clicked()),this, SLOT(exit_saving()) );


    this->pages_control_btn = new QToolButton();
    this->pages_control_btn->setIcon( QIcon( ":/resources/img/tool.png" ) );

    QMenu *m = new QMenu( tr( "Pages control" ) );
    m->addAction( QIcon( ":/resources/img/edit-add.png" ), tr( "Add new page" ) );
    m->addAction( QIcon( ":/resources/img/edit-copy.png" ), tr( "Copy current page" ) );
    m->addAction( QIcon( ":/resources/img/edit-paste.png" ), tr( "Paste previous copied or cut page" ) );
    m->addAction( QIcon( ":/resources/img/edit-cut.png" ), tr( "Cut current page (Copy+Delete)" ) );
    m->addAction( QIcon( ":/resources/img/edit-delete.png" ), tr( "Delete current page" ) );
    m->addSeparator();
    m->addAction( QIcon( ":/resources/img/edit-rename.png" ), tr( "Rename Event" ) );
    m->addAction( QIcon( ":/resources/img/edit-rename.png" ), tr( "Relocalte Event" ) );
    this->pages_control_btn->setMenu( m );
    this->pages_control_btn->setPopupMode( QToolButton::InstantPopup );

    QObject::connect( m->actions()[0], SIGNAL(triggered()), this, SLOT(add_page()) );
    QObject::connect( m->actions()[1], SIGNAL(triggered()), this, SLOT(copy_page()) );
    QObject::connect( m->actions()[2], SIGNAL(triggered()), this, SLOT(paste_page()) );
    QObject::connect( m->actions()[3], SIGNAL(triggered()), this, SLOT(cut_page()) );
    QObject::connect( m->actions()[4], SIGNAL(triggered()), this, SLOT(del_page()) );
    //5 is the seperator
    QObject::connect( m->actions()[6], SIGNAL(triggered()), this, SLOT(rename_event()) );
    QObject::connect( m->actions()[7], SIGNAL(triggered()), this, SLOT(relocate_event()) );

    //to forbid pasting empty pages at startup
    m->actions()[3]->setEnabled( false );


    QHBoxLayout *h = new QHBoxLayout();

    h->addWidget( this->pages_label );
    h->addWidget( this->pages_control_btn );

    this->vLay = new QVBoxLayout();
    this->vLay->addWidget( this->label_event_info );
    this->vLay->addLayout( h );
    this->vLay->addWidget( this->pages_listWidget );
    this->vLay->addWidget( this->btn_templates_edit );

    this->lay = new QHBoxLayout();
    QWidget *w = new QWidget();
    w->setLayout( this->vLay );
    w->setMaximumWidth( 150 );

    QHBoxLayout *downer_bar = new QHBoxLayout();
    downer_bar->addStretch();
    downer_bar->addWidget( this->btn_cancel );
    downer_bar->addWidget( this->btn_save_exit );

    QVBoxLayout *v = new QVBoxLayout();
    v->addWidget( this->stackPit );
    v->addLayout( downer_bar );

    this->lay->addWidget( w );
    this->lay->addLayout( v );


    this->setLayout( this->lay );

    this->setWindowTitle( this->cacheEvent.title );
    this->setWindowIcon( QIcon( ":/resources/img/tokens_mini_event.png" ) );
//    this->resize( this->area_edit->width() + this->btn_templates_edit->width() + 44 , 400 );


    //* 4 Loading initial Data

    // appending code snippet elements from the pointer
    this->widget_templates->pickbox->clear();
    for ( int i = 0; i < this->elements->count(); i++ )
    {
//        QListWidgetItem *lwi = new QListWidgetItem(
//                QIcon( this->project_path + "/res/code_snippets/" + this->elements[i].iconSmall ),
//                this->elements[i].title );
        this->widget_templates->pickbox->addItem( QIcon( this->elements->at(i).iconSmall ),
                                                   this->elements->at(i).title );
    }
    //setting some first values
    if ( this->elements->count() > 0 )
    {
        this->widget_templates->label_big_symbol->setPixmap( QPixmap( this->elements->at(0).iconBig ) );
        this->widget_templates->label_description_text->setText( this->elements->at(0).description );
    }

    //loading original osps

    if ( this->cacheEvent.pages.count() >= 1 )
        this->widget_edit->gui_behavior_cmd_widget->setOSPs( this->cacheEvent.pages[0].original_commandos_string );
    else
        this->add_page(); // to have at least an empty page

    this->rebuild_listwidget_entries_index();
    this->put_original_strings_into_gui(); // the current page is 0 at this time


    this->resize(840,400);
}
Exemplo n.º 15
0
static int uts_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	struct uts_namespace *uts_ns = current->nsproxy->uts_ns;
	struct ve_struct *ve = get_exec_env();
	int i, n1, n2, n3, new_version;
	struct page **new_pages, **p;

	/*
	 * For node or in case we've not changed UTS simply
	 * map preallocated original vDSO.
	 *
	 * In turn if we already allocated one for this UTS
	 * simply reuse it. It improves speed significantly.
	 */
	if (uts_ns == &init_uts_ns)
		goto map_init_uts;
	/*
	 * Dirty lockless hack. Strictly speaking
	 * we need to return @p here if it's non-nil,
	 * but since there only one trasition possible
	 * { =0 ; !=0 } we simply return @uts_ns->vdso.pages
	 */
	p = ACCESS_ONCE(uts_ns->vdso.pages);
	smp_read_barrier_depends();
	if (p)
		goto map_uts;

	if (sscanf(uts_ns->name.release, "%d.%d.%d", &n1, &n2, &n3) == 3) {
		/*
		 * If there were no changes on version simply reuse
		 * preallocated one.
		 */
		new_version = KERNEL_VERSION(n1, n2, n3);
		if (new_version == LINUX_VERSION_CODE)
			goto map_init_uts;
	} else {
		/*
		 * If admin is passed malformed string here
		 * lets warn him once but continue working
		 * not using vDSO virtualization at all. It's
		 * better than walk out with error.
		 */
		pr_warn_once("Wrong release uts name format detected."
			     " Ignoring vDSO virtualization.\n");
		goto map_init_uts;
	}

	mutex_lock(&vdso_mutex);
	if (uts_ns->vdso.pages) {
		mutex_unlock(&vdso_mutex);
		goto map_uts;
	}

	uts_ns->vdso.nr_pages	= init_uts_ns.vdso.nr_pages;
	uts_ns->vdso.size	= init_uts_ns.vdso.size;
	uts_ns->vdso.version_off= init_uts_ns.vdso.version_off;
	new_pages		= kmalloc(sizeof(struct page *) * init_uts_ns.vdso.nr_pages, GFP_KERNEL);
	if (!new_pages) {
		pr_err("Can't allocate vDSO pages array for VE %d\n", ve->veid);
		goto out_unlock;
	}

	for (i = 0; i < uts_ns->vdso.nr_pages; i++) {
		struct page *p = alloc_page(GFP_KERNEL);
		if (!p) {
			pr_err("Can't allocate page for VE %d\n", ve->veid);
			for (; i > 0; i--)
				put_page(new_pages[i - 1]);
			kfree(new_pages);
			goto out_unlock;
		}
		new_pages[i] = p;
		copy_page(page_address(p), page_address(init_uts_ns.vdso.pages[i]));
	}

	uts_ns->vdso.addr = vmap(new_pages, uts_ns->vdso.nr_pages, 0, PAGE_KERNEL);
	if (!uts_ns->vdso.addr) {
		pr_err("Can't map vDSO pages for VE %d\n", ve->veid);
		for (i = 0; i < uts_ns->vdso.nr_pages; i++)
			put_page(new_pages[i]);
		kfree(new_pages);
		goto out_unlock;
	}

	*((int *)(uts_ns->vdso.addr + uts_ns->vdso.version_off)) = new_version;
	smp_wmb();
	uts_ns->vdso.pages = new_pages;
	mutex_unlock(&vdso_mutex);

	pr_debug("vDSO version transition %d -> %d for VE %d\n",
		 LINUX_VERSION_CODE, new_version, ve->veid);

map_uts:
	return setup_additional_pages(bprm, uses_interp, uts_ns->vdso.pages, uts_ns->vdso.size);
map_init_uts:
	return setup_additional_pages(bprm, uses_interp, init_uts_ns.vdso.pages, init_uts_ns.vdso.size);
out_unlock:
	mutex_unlock(&vdso_mutex);
	return -ENOMEM;
}
Exemplo n.º 16
0
/*
 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
 * kernel image.
 */
int __init init_vdso_image(const struct vdso_image *image,
		struct vm_special_mapping *vdso_mapping)
{
	int i;
	struct page *dp, **dpp = NULL;
	int dnpages = 0;
	struct page *cp, **cpp = NULL;
	int cnpages = (image->size) / PAGE_SIZE;

	/*
	 * First, the vdso text.  This is initialied data, an integral number of
	 * pages long.
	 */
	if (WARN_ON(image->size % PAGE_SIZE != 0))
		goto oom;

	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
	vdso_mapping->pages = cpp;

	if (!cpp)
		goto oom;

	if (vdso_fix_stick) {
		/*
		 * If the system uses %tick instead of %stick, patch the VDSO
		 * with instruction reading %tick instead of %stick.
		 */
		unsigned int j, k = SAVE_INSTR_SIZE;
		unsigned char *data = image->data;

		for (j = image->sym_vread_tick_patch_start;
		     j < image->sym_vread_tick_patch_end; j++) {

			data[image->sym_vread_tick + k] = data[j];
			k++;
		}
	}

	for (i = 0; i < cnpages; i++) {
		cp = alloc_page(GFP_KERNEL);
		if (!cp)
			goto oom;
		cpp[i] = cp;
		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
	}

	/*
	 * Now the vvar page.  This is uninitialized data.
	 */

	if (vvar_data == NULL) {
		dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
		if (WARN_ON(dnpages != 1))
			goto oom;
		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
		vvar_mapping.pages = dpp;

		if (!dpp)
			goto oom;

		dp = alloc_page(GFP_KERNEL);
		if (!dp)
			goto oom;

		dpp[0] = dp;
		vvar_data = page_address(dp);
		memset(vvar_data, 0, PAGE_SIZE);

		vvar_data->seq = 0;
	}

	return 0;
 oom:
	if (cpp != NULL) {
		for (i = 0; i < cnpages; i++) {
			if (cpp[i] != NULL)
				__free_page(cpp[i]);
		}
		kfree(cpp);
		vdso_mapping->pages = NULL;
	}

	if (dpp != NULL) {
		for (i = 0; i < dnpages; i++) {
			if (dpp[i] != NULL)
				__free_page(dpp[i]);
		}
		kfree(dpp);
		vvar_mapping.pages = NULL;
	}

	pr_warn("Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
Exemplo n.º 17
0
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
	copy_page(kto, kfrom);
	__flush_dcache_area(kto, PAGE_SIZE);
}
Exemplo n.º 18
0
/*
 * Copy the user page.  No aliasing to deal with so we can just
 * attack the kernel's existing mapping of these pages.
 */
void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
	copy_page(kto, kfrom);
}
Exemplo n.º 19
0
/*
 * This routine handles present pages, when users try to write
 * to a shared page. It is done by copying the page to a new address
 * and decrementing the shared-page counter for the old page.
 *
 * Goto-purists beware: the only reason for goto's here is that it results
 * in better assembly code.. The "default" path will see no jumps at all.
 *
 * Note that this routine assumes that the protection checks have been
 * done by the caller (the low-level page fault routine in most cases).
 * Thus we can safely just mark it writable once we've done any necessary
 * COW.
 *
 * We also mark the page dirty at this point even though the page will
 * change only once the write actually happens. This avoids a few races,
 * and potentially makes it more efficient.
 */
void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
	unsigned long address, int write_access)
{
	pgd_t *page_dir;
	pmd_t *page_middle;
	pte_t *page_table, pte;
	unsigned long old_page, new_page;

	new_page = __get_free_page(GFP_KERNEL);
	page_dir = pgd_offset(vma->vm_mm, address);
	if (pgd_none(*page_dir))
		goto end_wp_page;
	if (pgd_bad(*page_dir))
		goto bad_wp_pagedir;
	page_middle = pmd_offset(page_dir, address);
	if (pmd_none(*page_middle))
		goto end_wp_page;
	if (pmd_bad(*page_middle))
		goto bad_wp_pagemiddle;
	page_table = pte_offset(page_middle, address);
	pte = *page_table;
	if (!pte_present(pte))
		goto end_wp_page;
	if (pte_write(pte))
		goto end_wp_page;
	old_page = pte_page(pte);
	if (old_page >= high_memory)
		goto bad_wp_page;
	tsk->min_flt++;
	/*
	 * Do we need to copy?
	 */
	if (mem_map[MAP_NR(old_page)].count != 1) {
		if (new_page) {
			if (PageReserved(mem_map + MAP_NR(old_page)))
				++vma->vm_mm->rss;
			copy_page(old_page,new_page);
			flush_page_to_ram(old_page);
			flush_page_to_ram(new_page);
			flush_cache_page(vma, address);
			set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
			free_page(old_page);
			flush_tlb_page(vma, address);
			return;
		}
		flush_cache_page(vma, address);
		set_pte(page_table, BAD_PAGE);
		flush_tlb_page(vma, address);
		free_page(old_page);
		oom(tsk);
		return;
	}
	flush_cache_page(vma, address);
	set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
	flush_tlb_page(vma, address);
	if (new_page)
		free_page(new_page);
	return;
bad_wp_page:
	printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
	send_sig(SIGKILL, tsk, 1);
	goto end_wp_page;
bad_wp_pagemiddle:
	printk("do_wp_page: bogus page-middle at address %08lx (%08lx)\n", address, pmd_val(*page_middle));
	send_sig(SIGKILL, tsk, 1);
	goto end_wp_page;
bad_wp_pagedir:
	printk("do_wp_page: bogus page-dir entry at address %08lx (%08lx)\n", address, pgd_val(*page_dir));
	send_sig(SIGKILL, tsk, 1);
end_wp_page:
	if (new_page)
		free_page(new_page);
	return;
}
Exemplo n.º 20
0
int main(void)
{
    WORD idx;
    struct user_info *user;
    char line[32];

    UART_DATA = 0x21;
    
    GPIO_SET2   = DRIVE_RESET | C64_RESET;

	memset(&mountfile, 0 ,sizeof(mountfile));

    // no cart
    CART_MODE = 0x00;

    GPIO_CLEAR2 = C64_RESET;
    
    wait2sec();

    // read index
    init_freeze();
    screen_initialized = 1;
    // now we can use the console :)

    // initialize timer IRQ for keyboard scan
    IRQ_TIMER    = 3906; // 50 Hz
    
    if(!iec_test()) {
        printf("IEC cable seems faulty.\n");
        while(1);
    }

/*
    {
        WORD w;
        BYTE b;
        BYTE *mem = (BYTE *)MAP1_ADDR;
        DWORD addr = 0x1400000;
		MAPPER_MAP1 = (addr >> 13);

		w = (addr & 0x1FFF);
		for(b=0;b<32;b++) {
			if((b & 7)==0) {
				printf("\n%07lx: ", addr + (DWORD)b);
                if((w + b) >= 0x2000) {
                    MAPPER_MAP1 = MAPPER_MAP1 + 1;
                    w -= 0x2000;
                }
            }
			printf("%02x ", mem[w + b]);
		}
		printf("\n");
    }

    ethernet_test();
    while(1)
        ;
*/

    // read index
    idx = read_index();
    if(idx == 0xFFFF) {
        idx = 0;
        printf("Couldn't read index file.\n");
    }


/*
    {
        BYTE b;
        
        // dump content of first 144 bytes of one_wire rom
        onewire_reset();
        onewire_sendbyte(0xCC); // skip rom
        onewire_sendbyte(0xF0); // read memory
        onewire_sendbyte(0); // starting address
        onewire_sendbyte(0);
        
        for(b=0;b<144;b++) {
            if((b % 16)==0) {
                printf("\n0%02x: ", b);
            }
            printf("%02x ", onewire_getbyte());
        } printf("\n");
    }
    printf("!Enter your name:\n");
    read(0, line, 32);    
    printf("Your name is: %s.\n", line);
*/
    
    // get user name
    user = get_user(idx);

    printf("The unit should be a %s version.\n", user->type);
    if(!test_sdram()) {
        if(user->type[0] != 'b') {
            printf("SDRAM test failed...\n Are you sure you got a plus version?\nStop.\n");
            while(1);
        }
    } else {
        printf("SDRAM is ok!\n");
        if(user->type[0] == 'b') {
            printf(" User requested a basic version...\n Are you sure you want to continue?\n");
//            scanf("%s", line);
            read(0, line, 32);
//    		uart_read_buffer(line, 32);
    		if(line[0] != 'y') {
    		    printf("Stop.\n");
    		    while(1);
    		}
        }
    }
    
    if(user->type[0] == 'e') { // ethernet version
        if(!ethernet_test()) {
            printf("Ethernet failed.\n");
            while(1);
        }
    }

    if(!flash_roms()) {
        printf("Flashing ROMS Failed.\n");
        while(1);
    }

    if(!personalize(user)) {
        printf("PERSONALIZING FAILED... STOPPED!!\n");
        while(1);
    }
        
    // store where we left off
    if(!write_index(idx+1)) {
        printf("WARNING!! Writing index failed.\n");
    }
    
    printf("Programming cart successful...\n");

    // Init 1541 to enable sound for testing
    copy_page(0x0836, ROM1541_ADDR);
    copy_page(0x0837, ROM1541_ADDR+1);
    GPIO_CLEAR2 = DRIVE_RESET;

    while(1);
}
Exemplo n.º 21
0
void page_demo ()
{

char	*Error1 = "Failure during SET_VGA_MODEX (0, 320, 200, 2) call";

int		Last_Objects[2], Visible_Objects;

int		Screen_X = 384;
int		Screen_Y = 224;

int		x, y, z;
int		c, dc;
int		x1, y1, x2, y2;

int		Sprite_X, Sprite_Y;
int		Current_Page;
int		New_X, New_Y;

int		View_X,	View_Y,	View_Max, View_Cnt, View_XD, View_YD;
int		Set_Color, Prev_Color, S_Dir, P_Dir;

int		Demo_Running = True;
int		redo, code;

int pee;
pee = set_vga_modex(Mode_320x200, Screen_X, Screen_Y, 3);
	if ( pee > 0)
	{
		set_video_mode (3);
		dos_print (Error1);
		fprintf(stdout, "return value is %d\n", pee);
		//error_out (Error1);
		exit (EXIT_SUCCESS);
	}

	set_active_page (0);
	clear_vga_screen (c_BLACK);

   	print_str ("This is a Test of the Following Functions:", 99, 10, 9, c_bWHITE, c_BLACK);

	draw_line (10, 18, 350, 18, c_YELLOW);
	print_str ("SET_ACTIVE_PAGE", 99, 10, 20, c_bBLUE, c_BLACK);
	print_str ("SET_DISPLAY_PAGE", 99, 10, 30, c_GREEN, c_BLACK);
	print_str ("SET_DAC_REGISTER", 99, 10, 40, c_RED, c_BLACK);
	print_str ("CLEAR_VGA_SCREEN", 99, 10, 50, c_CYAN, c_BLACK);

	print_str ("TDRAW_BITMAP", 99, 10, 60, c_PURPLE, c_BLACK);
	print_str ("COPY_PAGE", 99, 10, 70, c_GREEN, c_BLACK);
	print_str ("COPY_BITMAP", 99, 10, 80, c_CYAN, c_BLACK);

	print_str ("GPRINTC", 99, 10, 90, c_BLUE, c_BLACK);
	print_str ("TGPRINTC", 99, 10, 100, c_GREEN, c_BLACK);
	print_str ("SET_WINDOW", 99, 10, 110, c_RED, c_BLACK);

	print_str ("VIRTUAL SCREEN SIZES", 20, 190, 20, c_bBLUE, c_BLACK);
	print_str ("    SMOOTH SCROLLING", 20, 190, 30, c_GREEN, c_BLACK);
	print_str ("    SPRITE ANIMATION", 20, 190, 40, c_CYAN, c_BLACK);
	print_str ("       PAGE FLIPPING", 20, 190, 50, c_RED, c_BLACK);
	print_str ("       COLOR CYCLING", 20, 190, 60, c_PURPLE, c_BLACK);

	for (x = 0; x <=60; x++)
	{
		set_dac_register (50 + x, 3 + x, 0, 60 - x);
		set_dac_register (150 + x, 3 + x, 0, 60 - x);
	}

	c = 0;
	dc = 1;
	for (x = 0; x <= (Screen_X / 2); x++)
	{
		draw_line (Screen_X / 2 - 1, Screen_Y / 4, x, Screen_Y - 1, c + 50);
		draw_line (Screen_X / 2, Screen_Y / 4, Screen_X - x - 1, Screen_Y - 1, c + 50);
		c+= dc;
		if ((c == 0) || (c == 60) ) { dc = -dc;}
	}

	tprint_str ("Press <ANY KEY> to Continue", 99, 72, 190, c_bWHITE);
	tprint_str ("< > = Faster   < > = Slower", 99, 72, 204, c_bGREEN);
	tprint_str ("< > = Fewer Shapes  < > = More Shapes", 99, 32, 218, c_bCYAN);

	tgprintc (43, 80, 204, c_YELLOW);
	tgprintc (45, 200, 204, c_YELLOW);

	tgprintc (25, 40, 218, c_YELLOW);
	tgprintc (24, 200, 218, c_YELLOW);

	copy_page (0, 1);
	copy_page (0, 2);

	for (x = 0; x < MAX_SPRITES; x++)
	{
		do {
			Obj[x].X_Dir = random_int(7) - 3;
			Obj[x].Y_Dir = random_int(7) - 3;
		} while ( (Obj[x].X_Dir == 0) && (Obj[x].Y_Dir == 0) );

		Obj[x].Shape = x % MAX_SHAPES;

		Sprite_X = Img[Obj[x].Shape].X_Width;
		Sprite_Y = Img[Obj[x].Shape].Y_Width;

		Obj[x].X_pos = 1 + random_int(Screen_X - Sprite_X - 2);
		Obj[x].Y_pos = 1 + random_int(Screen_Y - Sprite_Y - 2);

		Obj[x].Last_X[0] = Obj[x].X_pos;
		Obj[x].Last_X[1] = Obj[x].X_pos;
		Obj[x].Last_Y[0] = Obj[x].Y_pos;
		Obj[x].Last_Y[1] = Obj[x].Y_pos;

	}

	Current_Page = 0;

	View_X = 0;
	View_Y = 0;
	View_Max = 3;
	View_Cnt = 0;
	View_XD = 1;
	View_YD = 1;

	Set_Color = 3;
	S_Dir = 1;
	Prev_Color = 0;
	P_Dir = 1;

	Visible_Objects = MAX_SPRITES / 2;
	Last_Objects[0] = 0;
	Last_Objects[1] = 0;

	while (Demo_Running)
	{

		set_active_page (Current_Page);

			/* Erase Old Images */

		for (x = 0; x <= Last_Objects[Current_Page]; x++)
		{
			z = 2;
			y = Obj[x].Shape;
			x1 = Obj[x].Last_X[Current_Page];
			y1 = Obj[x].Last_Y[Current_Page];
			x2 = x1 + Img[y].X_Width -1;
			y2 = y1 + Img[y].Y_Width -1;

			x1 = x1 & 0xfffc;
			x2 = x2 | 0x0003;

			copy_bitmap (z, x1, y1, x2, y2, Current_Page, x1, y1);
		}

			/* Draw new images */

		for (x = 0; x <= Visible_Objects; x++)
		{
			Sprite_X = Img[Obj[x].Shape].X_Width;
			Sprite_Y = Img[Obj[x].Shape].Y_Width;

			/*  Move Sprite */

			do
			{
				redo = False;
				New_X = Obj[x].X_pos + Obj[x].X_Dir;

				if (( New_X < 0 ) || (New_X + Sprite_X > Screen_X) )
				{
					Obj[x].X_Dir = -Obj[x].X_Dir;
					if (random_int(20) == 1)
					{
						do
						{
							Obj[x].X_Dir = random_int(7) - 3;
							Obj[x].Y_Dir = random_int(7) - 3;
						} while ( (Obj[x].X_Dir == 0) && (Obj[x].Y_Dir == 0) );
						redo = True;
					}
				}
			} while (redo);
			Obj[x].X_pos = Obj[x].X_pos + Obj[x].X_Dir;


			do
			{
				redo = False;
				New_Y = Obj[x].Y_pos + Obj[x].Y_Dir;

				if ( (New_Y < 0) || (New_Y + Sprite_Y > Screen_Y) )
				{
					Obj[x].Y_Dir = -Obj[x].Y_Dir;
					if (random_int(20) == 1)
					{
						do
						{
							Obj[x].X_Dir = random_int(7) - 3;
							Obj[x].Y_Dir = random_int(7) - 3;
						} while ( (Obj[x].X_Dir == 0) && (Obj[x].Y_Dir == 0) );
						redo = True;
					}
				}
			} while (redo);

			Obj[x].Y_pos = Obj[x].Y_pos + Obj[x].Y_Dir;

			/* Draw Sprite */

			tdraw_bitmap ((char far*) &Img[Obj[x].Shape], Obj[x].X_pos, Obj[x].Y_pos, Sprite_X, Sprite_Y);

			Obj[x].Last_X[Current_Page] = Obj[x].X_pos;
			Obj[x].Last_Y[Current_Page] = Obj[x].Y_pos;

		}

		Last_Objects[Current_Page] = Visible_Objects;


		/* Pan Screen Back & Forth */

		View_Cnt++;
		if (View_Cnt >= View_Max)
		{
			View_X+= View_XD;
			if ( (View_X == 0) || (View_X == 39) ) {View_XD = -View_XD;}
			if (View_XD < 0)
			{
				View_Y+= View_YD;
				if ( (View_Y == 0) || (View_Y == 39) ) {View_YD = -View_YD;}
			}

			set_window (Current_Page, View_X, View_Y);

			View_Cnt = 0;
		}
		else
		{
			set_display_page (Current_Page);
		}

		/* Cycle Colors */

		set_dac_register (50 + Prev_Color, 3 + Prev_Color, 0, 60 - Prev_Color);
		set_dac_register (50 + Set_Color, Set_Color, 10, 63 - Set_Color);

		set_dac_register (150 + Prev_Color, 3 + Prev_Color, 0, 60 - Prev_Color);
		set_dac_register (150 + Set_Color, 63, 63, Set_Color);

		Set_Color+= S_Dir;
		if ( (Set_Color == 60) || (Set_Color == 0) ) {S_Dir = -S_Dir;}

		Prev_Color+= P_Dir;
		if ( (Prev_Color == 60) || (Prev_Color == 0) ) {P_Dir = -P_Dir;}

		/* Check for Keystroke */

		Current_Page = Current_Page ^ 0x01;

		code = scan_keyboard ();

		if (code == Ky_ESC) {Demo_Running = False;}

		if (code == Ky_Plus)
		{
			if (View_Max < 12) {View_Max++;}
		}

		if (code == Ky_Minus)
		{
			if (View_Max > 1) {View_Max--;}
			if (View_Cnt >= View_Max) {View_Cnt = 0;}
		}

		if (code == Ky_Up)
		{
			if (Visible_Objects < MAX_SPRITES-1) {Visible_Objects++;}
		}

		if (code == Ky_Down)
		{
			if (Visible_Objects > 0) {Visible_Objects--;}
		}

	}

}
Exemplo n.º 22
0
/*
 * Must not be called with IRQs off.  This should only be used on the
 * slow path.
 *
 * Copy a foreign granted page to local memory.
 */
int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
{
	struct gnttab_unmap_and_replace unmap;
	mmu_update_t mmu;
	struct page *page;
	struct page *new_page;
	void *new_addr;
	void *addr;
	paddr_t pfn;
	maddr_t mfn;
	maddr_t new_mfn;
	int err;

	page = *pagep;
	if (!get_page_unless_zero(page))
		return -ENOENT;

	err = -ENOMEM;
	new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
	if (!new_page)
		goto out;

	new_addr = page_address(new_page);
	addr = page_address(page);
	copy_page(new_addr, addr);

	pfn = page_to_pfn(page);
	mfn = pfn_to_mfn(pfn);
	new_mfn = virt_to_mfn(new_addr);

	write_seqlock_bh(&gnttab_dma_lock);

	/* Make seq visible before checking page_mapped. */
	smp_mb();

	/* Has the page been DMA-mapped? */
	if (unlikely(page_mapped(page))) {
		write_sequnlock_bh(&gnttab_dma_lock);
		put_page(new_page);
		err = -EBUSY;
		goto out;
	}

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		set_phys_to_machine(pfn, new_mfn);

	gnttab_set_replace_op(&unmap, (unsigned long)addr,
			      (unsigned long)new_addr, ref);

	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
					&unmap, 1);
	BUG_ON(err);
	BUG_ON(unmap.status != GNTST_okay);

	write_sequnlock_bh(&gnttab_dma_lock);

	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);

		mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
		mmu.val = pfn;
		err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
		BUG_ON(err);
	}

	new_page->mapping = page->mapping;
	new_page->index = page->index;
	set_bit(PG_foreign, &new_page->flags);
	if (PageReserved(page))
		SetPageReserved(new_page);
	*pagep = new_page;

	SetPageForeign(page, gnttab_page_free);
	page->mapping = NULL;

out:
	put_page(page);
	return err;
}
Exemplo n.º 23
0
/*
 * Can switch to the next dst_sg element, so, to copy to strictly only
 * one dst_sg element, it must be either last in the chain, or
 * copy_len == dst_sg->length.
 */
static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
			size_t *pdst_offs, struct scatterlist *src_sg,
			size_t copy_len,
			enum km_type d_km_type, enum km_type s_km_type)
{
	int res = 0;
	struct scatterlist *dst_sg;
	size_t src_len, dst_len, src_offs, dst_offs;
	struct page *src_page, *dst_page;

	dst_sg = *pdst_sg;
	dst_len = *pdst_len;
	dst_offs = *pdst_offs;
	dst_page = sg_page(dst_sg);

	src_page = sg_page(src_sg);
	src_len = src_sg->length;
	src_offs = src_sg->offset;

	do {
		void *saddr, *daddr;
		size_t n;

		saddr = kmap_atomic(src_page +
					 (src_offs >> PAGE_SHIFT), s_km_type) +
				    (src_offs & ~PAGE_MASK);
		daddr = kmap_atomic(dst_page +
					(dst_offs >> PAGE_SHIFT), d_km_type) +
				    (dst_offs & ~PAGE_MASK);

		if (((src_offs & ~PAGE_MASK) == 0) &&
		    ((dst_offs & ~PAGE_MASK) == 0) &&
		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
		    (copy_len >= PAGE_SIZE)) {
			copy_page(daddr, saddr);
			n = PAGE_SIZE;
		} else {
			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
			n = min(n, src_len);
			n = min(n, dst_len);
			n = min_t(size_t, n, copy_len);
			memcpy(daddr, saddr, n);
		}
		dst_offs += n;
		src_offs += n;

		kunmap_atomic(saddr, s_km_type);
		kunmap_atomic(daddr, d_km_type);

		res += n;
		copy_len -= n;
		if (copy_len == 0)
			goto out;

		src_len -= n;
		dst_len -= n;
		if (dst_len == 0) {
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				goto out;
			dst_page = sg_page(dst_sg);
			dst_len = dst_sg->length;
			dst_offs = dst_sg->offset;
		}
	} while (src_len > 0);

out:
	*pdst_sg = dst_sg;
	*pdst_len = dst_len;
	*pdst_offs = dst_offs;
	return res;
}
Exemplo n.º 24
0
Arquivo: vm.c Projeto: jimlar/fusion
void do_page_fault (process_t     *proc,
		    unsigned int   linear_addr,
		    long           error_code)
{
  page_frame_t       *new_page, *page_frame;
  unsigned int        phys_frame_addr;
  pagetable_entry_t  *pte;
  int                 pf_ok;


  //Only handle usermode writes to present pages (not present ones later (pagein))
  //if ((error_code & PAGE_FAULT_P) &&
  //    (error_code & PAGE_FAULT_RW) &&
  //    (error_code & PAGE_FAULT_US))
  
  if (error_code & PAGE_FAULT_P) {
    
    //
    // CHECK IF WE REALLY SHOULD COPY THIS
    //  (with vm_blocks)
    //

    pf_ok = 0;

    //Only write pagefaults handled
    if (error_code & PAGE_FAULT_RW) {
      
      if ((linear_addr >= proc->vm_data.vm_start) &&
	  (linear_addr < proc->vm_data.vm_end) &&
	  (proc->vm_data.vm_flags & VM_READWRITE))
	pf_ok = 1;
      
      if ((linear_addr >= proc->vm_stack.vm_start) &&
	  (linear_addr < proc->vm_stack.vm_end) &&
	  (proc->vm_stack.vm_flags & VM_READWRITE))
	pf_ok = 1;
      
      if ((linear_addr >= proc->vm_kernel_stack.vm_start) &&
	  (linear_addr < proc->vm_kernel_stack.vm_end) &&
	  (proc->vm_kernel_stack.vm_flags & VM_READWRITE))
	pf_ok = 1;
    }


    if (!pf_ok)
    {
      printf ("*real* page fault (out of bounds), should terminate task!\n");
      printf ("present=%s, write=%s, usermode=%s, address=0x%x, dir=0x%x\n",
	      ((int) error_code & PAGE_FAULT_P) ? "true" : "false",
	      ((int) error_code & PAGE_FAULT_RW) ? "true" : "false",
	      ((int) error_code & PAGE_FAULT_US) ? "true" : "false",
	      linear_addr,
	      (int) (proc->tss.cr3));
      
      printf ("data start 0x%x, data end 0x%x, flags 0x%x\n",
	      (int) proc->vm_data.vm_start,
	      (int) proc->vm_data.vm_end,
	      (int) proc->vm_data.vm_flags);

      printf ("stack start 0x%x, stack end 0x%x, flags 0x%x\n",
	      (int) proc->vm_stack.vm_start,
	      (int) proc->vm_stack.vm_end,
	      (int) proc->vm_stack.vm_flags);

      while(1);
    }


    //Get dir
    pte = (pagetable_entry_t *) proc->tss.cr3;

    //Get table from dir
    pte = (pagetable_entry_t *) PTE_TO_PHYSICAL(pte[PAGE_DIR_INDEX(linear_addr)]);
    
    //Get page from table
    phys_frame_addr = PTE_TO_PHYSICAL(pte[PAGE_TABLE_INDEX(linear_addr)]);
 
    //Check use count of this page frame
    page_frame = &page_frames[PHYS_TO_FRAME_NR (phys_frame_addr)];
    
    if (page_frame->count > 1)
    {
      //Page in use by others, we need to copy
      new_page = get_free_page();

      if (new_page == NULL)
	panic ("Can't COW, no free pages!");

      //Copy page
      //printf ("COW(copy, 0x%x->0x%x)\n", phys_frame_addr, new_page->page_frame_nr * PAGE_SIZE);
 
      copy_page (phys_frame_addr, new_page->page_frame_nr * PAGE_SIZE);

      //Remap pagetable
      map_page (proc, new_page->page_frame_nr * PAGE_SIZE, 
		linear_addr & PHYS_PAGE_MASK, PAGE_PRESENT | PAGE_USER | PAGE_WRITEABLE);
    
      //Decrease use count
      page_frame->count--;

    } else if (page_frame->count == 1)
    {
      //The page is not in use by others, just remap
      //printf ("COW(remap, 0x%x)\n", linear_addr);
 
      //Remap pagetable
      map_page (proc, phys_frame_addr, 
		linear_addr & PHYS_PAGE_MASK, PAGE_PRESENT | PAGE_USER | PAGE_WRITEABLE);
  
    } else
    {
      printf ("Page frame has invalid use count!\n");
      while (1);
    }

    //Schedule next process
    schedule();

  } else {

    printf ("*real* page fault (page not present), should terminate task!\n");
 
    printf ("present=%s, write=%s, usermode=%s, address=0x%x, dir=0x%x\n",
	    ((int) error_code & PAGE_FAULT_P) ? "true" : "false",
	    ((int) error_code & PAGE_FAULT_RW) ? "true" : "false",
	    ((int) error_code & PAGE_FAULT_US) ? "true" : "false",
	    linear_addr,
	    (int) (proc->tss.cr3));
 
    printf ("data start 0x%x, data end 0x%x, flags 0x%x\n",
	      (int) proc->vm_data.vm_start,
	      (int) proc->vm_data.vm_end,
	      (int) proc->vm_data.vm_flags);

    printf ("stack start 0x%x, stack end 0x%x, flags 0x%x\n",
	    (int) proc->vm_stack.vm_start,
	    (int) proc->vm_stack.vm_end,
	    (int) proc->vm_stack.vm_flags);

    while(1);
  }
}
Exemplo n.º 25
0
int main(void){

  ogg_stream_init(&os_en,0x04030201);
  ogg_stream_init(&os_de,0x04030201);
  ogg_sync_init(&oy);

  /* Exercise each code path in the framing code.  Also verify that
     the checksums are working.  */

  {
    /* 17 only */
    const int packets[]={17, -1};
    const int *headret[]={head1_0,NULL};
    
    fprintf(stderr,"testing single page encoding... ");
    test_pack(packets,headret);
  }

  {
    /* 17, 254, 255, 256, 500, 510, 600 byte, pad */
    const int packets[]={17, 254, 255, 256, 500, 510, 600, -1};
    const int *headret[]={head1_1,head2_1,NULL};
    
    fprintf(stderr,"testing basic page encoding... ");
    test_pack(packets,headret);
  }

  {
    /* nil packets; beginning,middle,end */
    const int packets[]={0,17, 254, 255, 0, 256, 0, 500, 510, 600, 0, -1};
    const int *headret[]={head1_2,head2_2,NULL};
    
    fprintf(stderr,"testing basic nil packets... ");
    test_pack(packets,headret);
  }

  {
    /* large initial packet */
    const int packets[]={4345,259,255,-1};
    const int *headret[]={head1_3,head2_3,NULL};
    
    fprintf(stderr,"testing initial-packet lacing > 4k... ");
    test_pack(packets,headret);
  }

  {
    /* continuing packet test */
    const int packets[]={0,4345,259,255,-1};
    const int *headret[]={head1_4,head2_4,head3_4,NULL};
    
    fprintf(stderr,"testing single packet page span... ");
    test_pack(packets,headret);
  }

  /* page with the 255 segment limit */
  {

    const int packets[]={0,10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,10,
		   10,10,10,10,10,10,10,50,-1};
    const int *headret[]={head1_5,head2_5,head3_5,NULL};
    
    fprintf(stderr,"testing max packet segments... ");
    test_pack(packets,headret);
  }

  {
    /* packet that overspans over an entire page */
    const int packets[]={0,100,9000,259,255,-1};
    const int *headret[]={head1_6,head2_6,head3_6,head4_6,NULL};
    
    fprintf(stderr,"testing very large packets... ");
    test_pack(packets,headret);
  }

  {
    /* term only page.  why not? */
    const int packets[]={0,100,4080,-1};
    const int *headret[]={head1_7,head2_7,head3_7,NULL};
    
    fprintf(stderr,"testing zero data page (1 nil packet)... ");
    test_pack(packets,headret);
  }



  {
    /* build a bunch of pages for testing */
    unsigned char *data=_ogg_malloc(1024*1024);
    int pl[]={0,100,4079,2956,2057,76,34,912,0,234,1000,1000,1000,300,-1};
    int inptr=0,i,j;
    ogg_page og[5];
    
    ogg_stream_reset(&os_en);

    for(i=0;pl[i]!=-1;i++){
      ogg_packet op;
      int len=pl[i];
      
      op.packet=data+inptr;
      op.bytes=len;
      op.e_o_s=(pl[i+1]<0?1:0);
      op.granulepos=(i+1)*1000;

      for(j=0;j<len;j++)data[inptr++]=i+j;
      ogg_stream_packetin(&os_en,&op);
    }

    _ogg_free(data);

    /* retrieve finished pages */
    for(i=0;i<5;i++){
      if(ogg_stream_pageout(&os_en,&og[i])==0){
	fprintf(stderr,"Too few pages output building sync tests!\n");
	exit(1);
      }
      copy_page(&og[i]);
    }

    /* Test lost pages on pagein/packetout: no rollback */
    {
      ogg_page temp;
      ogg_packet test;

      fprintf(stderr,"Testing loss of pages... ");

      ogg_sync_reset(&oy);
      ogg_stream_reset(&os_de);
      for(i=0;i<5;i++){
	memcpy(ogg_sync_buffer(&oy,og[i].header_len),og[i].header,
	       og[i].header_len);
	ogg_sync_wrote(&oy,og[i].header_len);
	memcpy(ogg_sync_buffer(&oy,og[i].body_len),og[i].body,og[i].body_len);
	ogg_sync_wrote(&oy,og[i].body_len);
      }

      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);
      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);
      ogg_sync_pageout(&oy,&temp);
      /* skip */
      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);

      /* do we get the expected results/packets? */
      
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,0,0,0);
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,100,1,-1);
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,4079,2,3000);
      if(ogg_stream_packetout(&os_de,&test)!=-1){
	fprintf(stderr,"Error: loss of page did not return error\n");
	exit(1);
      }
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,76,5,-1);
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,34,6,-1);
      fprintf(stderr,"ok.\n");
    }

    /* Test lost pages on pagein/packetout: rollback with continuation */
    {
      ogg_page temp;
      ogg_packet test;

      fprintf(stderr,"Testing loss of pages (rollback required)... ");

      ogg_sync_reset(&oy);
      ogg_stream_reset(&os_de);
      for(i=0;i<5;i++){
	memcpy(ogg_sync_buffer(&oy,og[i].header_len),og[i].header,
	       og[i].header_len);
	ogg_sync_wrote(&oy,og[i].header_len);
	memcpy(ogg_sync_buffer(&oy,og[i].body_len),og[i].body,og[i].body_len);
	ogg_sync_wrote(&oy,og[i].body_len);
      }

      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);
      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);
      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);
      ogg_sync_pageout(&oy,&temp);
      /* skip */
      ogg_sync_pageout(&oy,&temp);
      ogg_stream_pagein(&os_de,&temp);

      /* do we get the expected results/packets? */
      
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,0,0,0);
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,100,1,-1);
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,4079,2,3000);
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,2956,3,4000);
      if(ogg_stream_packetout(&os_de,&test)!=-1){
	fprintf(stderr,"Error: loss of page did not return error\n");
	exit(1);
      }
      if(ogg_stream_packetout(&os_de,&test)!=1)error();
      checkpacket(&test,300,13,14000);
      fprintf(stderr,"ok.\n");
    }
    
    /* the rest only test sync */
    {
      ogg_page og_de;
      /* Test fractional page inputs: incomplete capture */
      fprintf(stderr,"Testing sync on partial inputs... ");
      ogg_sync_reset(&oy);
      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header,
	     3);
      ogg_sync_wrote(&oy,3);
      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      
      /* Test fractional page inputs: incomplete fixed header */
      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header+3,
	     20);
      ogg_sync_wrote(&oy,20);
      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      
      /* Test fractional page inputs: incomplete header */
      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header+23,
	     5);
      ogg_sync_wrote(&oy,5);
      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      
      /* Test fractional page inputs: incomplete body */
      
      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header+28,
	     og[1].header_len-28);
      ogg_sync_wrote(&oy,og[1].header_len-28);
      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      
      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body,1000);
      ogg_sync_wrote(&oy,1000);
      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      
      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body+1000,
	     og[1].body_len-1000);
      ogg_sync_wrote(&oy,og[1].body_len-1000);
      if(ogg_sync_pageout(&oy,&og_de)<=0)error();
      
      fprintf(stderr,"ok.\n");
    }

    /* Test fractional page inputs: page + incomplete capture */
    {
      ogg_page og_de;
      fprintf(stderr,"Testing sync on 1+partial inputs... ");
      ogg_sync_reset(&oy); 

      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header,
	     og[1].header_len);
      ogg_sync_wrote(&oy,og[1].header_len);

      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body,
	     og[1].body_len);
      ogg_sync_wrote(&oy,og[1].body_len);

      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header,
	     20);
      ogg_sync_wrote(&oy,20);
      if(ogg_sync_pageout(&oy,&og_de)<=0)error();
      if(ogg_sync_pageout(&oy,&og_de)>0)error();

      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header+20,
	     og[1].header_len-20);
      ogg_sync_wrote(&oy,og[1].header_len-20);
      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body,
	     og[1].body_len);
      ogg_sync_wrote(&oy,og[1].body_len);
      if(ogg_sync_pageout(&oy,&og_de)<=0)error();

      fprintf(stderr,"ok.\n");
    }
    
    /* Test recapture: garbage + page */
    {
      ogg_page og_de;
      fprintf(stderr,"Testing search for capture... ");
      ogg_sync_reset(&oy); 
      
      /* 'garbage' */
      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body,
	     og[1].body_len);
      ogg_sync_wrote(&oy,og[1].body_len);

      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header,
	     og[1].header_len);
      ogg_sync_wrote(&oy,og[1].header_len);

      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body,
	     og[1].body_len);
      ogg_sync_wrote(&oy,og[1].body_len);

      memcpy(ogg_sync_buffer(&oy,og[2].header_len),og[2].header,
	     20);
      ogg_sync_wrote(&oy,20);
      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      if(ogg_sync_pageout(&oy,&og_de)<=0)error();
      if(ogg_sync_pageout(&oy,&og_de)>0)error();

      memcpy(ogg_sync_buffer(&oy,og[2].header_len),og[2].header+20,
	     og[2].header_len-20);
      ogg_sync_wrote(&oy,og[2].header_len-20);
      memcpy(ogg_sync_buffer(&oy,og[2].body_len),og[2].body,
	     og[2].body_len);
      ogg_sync_wrote(&oy,og[2].body_len);
      if(ogg_sync_pageout(&oy,&og_de)<=0)error();

      fprintf(stderr,"ok.\n");
    }

    /* Test recapture: page + garbage + page */
    {
      ogg_page og_de;
      fprintf(stderr,"Testing recapture... ");
      ogg_sync_reset(&oy); 

      memcpy(ogg_sync_buffer(&oy,og[1].header_len),og[1].header,
	     og[1].header_len);
      ogg_sync_wrote(&oy,og[1].header_len);

      memcpy(ogg_sync_buffer(&oy,og[1].body_len),og[1].body,
	     og[1].body_len);
      ogg_sync_wrote(&oy,og[1].body_len);

      memcpy(ogg_sync_buffer(&oy,og[2].header_len),og[2].header,
	     og[2].header_len);
      ogg_sync_wrote(&oy,og[2].header_len);

      memcpy(ogg_sync_buffer(&oy,og[2].header_len),og[2].header,
	     og[2].header_len);
      ogg_sync_wrote(&oy,og[2].header_len);

      if(ogg_sync_pageout(&oy,&og_de)<=0)error();

      memcpy(ogg_sync_buffer(&oy,og[2].body_len),og[2].body,
	     og[2].body_len-5);
      ogg_sync_wrote(&oy,og[2].body_len-5);

      memcpy(ogg_sync_buffer(&oy,og[3].header_len),og[3].header,
	     og[3].header_len);
      ogg_sync_wrote(&oy,og[3].header_len);

      memcpy(ogg_sync_buffer(&oy,og[3].body_len),og[3].body,
	     og[3].body_len);
      ogg_sync_wrote(&oy,og[3].body_len);

      if(ogg_sync_pageout(&oy,&og_de)>0)error();
      if(ogg_sync_pageout(&oy,&og_de)<=0)error();

      fprintf(stderr,"ok.\n");
    }
  }    

  return(0);
}
Exemplo n.º 26
0
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
{
	int ret = 0;
	size_t clen;
	unsigned long handle;
	struct page *page;
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
	struct zram_meta *meta = zram->meta;

	page = bvec->bv_page;
	src = meta->compress_buffer;

	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
		ret = zram_decompress_page(zram, uncmem, index);
		if (ret)
			goto out;
	}

	/*
	 * System overwrites unused sectors. Free memory associated
	 * with this sector now.
	 */
	if (meta->table[index].handle ||
	    zram_test_flag(meta, index, ZRAM_ZERO))
		zram_free_page(zram, index);

	user_mem = kmap_atomic(page);

	if (is_partial_io(bvec)) {
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
		uncmem = user_mem;
	}

	if (page_zero_filled(uncmem)) {
		kunmap_atomic(user_mem);
		zram->stats.pages_zero++;
		zram_set_flag(meta, index, ZRAM_ZERO);
		ret = 0;
		goto out;
	}

	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
			       meta->compress_workmem);

	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}

	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
		goto out;
	}

	if (unlikely(clen > max_zpage_size)) {
		zram->stats.bad_compress++;
		clen = PAGE_SIZE;
		src = NULL;
		if (is_partial_io(bvec))
			src = uncmem;
	}

	handle = zs_malloc(meta->mem_pool, clen);
	if (!handle) {
		pr_info("Error allocating memory for compressed "
			"page: %u, size=%zu\n", index, clen);
		ret = -ENOMEM;
		goto out;
	}
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);

	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
		src = kmap_atomic(page);
		copy_page(cmem, src);
		kunmap_atomic(src);
	} else {
		memcpy(cmem, src, clen);
	}

	zs_unmap_object(meta->mem_pool, handle);

	meta->table[index].handle = handle;
	meta->table[index].size = clen;

	/* Update stats */
	zram_stat64_add(zram, &zram->stats.compr_size, clen);
	zram->stats.pages_stored++;
	if (clen <= PAGE_SIZE / 2)
		zram->stats.good_compress++;

out:
	if (is_partial_io(bvec))
		kfree(uncmem);

	if (ret)
		zram_stat64_inc(zram, &zram->stats.failed_writes);
	return ret;
}