Esempio n. 1
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
				unsigned long address)
{
	struct vm_area_struct *vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	const int field = sizeof(unsigned long) * 2;
	unsigned long flags = 0;
	siginfo_t info;
	int fault;

	info.si_code = SEGV_MAPERR;

	/*
	* We fault-in kernel-space virtual memory on-demand. The
	* 'reference' page table is init_mm.pgd.
	*
	* NOTE! We MUST NOT take any locks for this case. We may
	* be in an interrupt or a critical region, and should
	* only copy the information from the master page table,
	* nothing more.
	*/
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
		goto vmalloc_fault;
#ifdef MODULE_START
	if (unlikely(address >= MODULE_START && address < MODULE_END))
		goto vmalloc_fault;
#endif

	/*
	* If we're in an interrupt or have no user
	* context, we must not take the fault..
	*/
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;

	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
	/*
	* Ok, we have a good vm_area for this memory access, so
	* we can handle it..
	 */
good_area:
	info.si_code = SEGV_ACCERR;

	if (write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags |= FAULT_FLAG_WRITE;
	} else {
		if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
			goto bad_area;
	}

survive:
	/*
	* If for any reason at all we couldn't handle the fault,
	* make sure we exit gracefully rather than endlessly redo
	* the fault.
	*/
	fault = handle_mm_fault(mm, vma, address, flags);
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}
	if (fault & VM_FAULT_MAJOR)
		tsk->maj_flt++;
	else
		tsk->min_flt++;

	up_read(&mm->mmap_sem);
	return;

	/*
	* Something tried to access memory that isn't in our memory map..
	* Fix it, but check if it's kernel or user first..
	 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		tsk->thread.cp0_badvaddr = address;
		tsk->thread.error_code = write;
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
		info.si_addr = (void __user *) address;
		force_sig_info(SIGSEGV, &info, tsk);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault? */
	if (fixup_exception(regs)) {
		current->thread.cp0_baduaddr = address;
		return;
	}

	/*
	* Oops. The kernel tried to access some bad page. We'll have to
	* terminate things with extreme prejudice.
	*/
	bust_spinlocks(1);

	printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
			"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
			0, field, address, field, regs->cp0_epc,
			field, regs->regs[3]);
	die("Oops", regs);

	/*
	* We ran out of memory, or some other thing happened to us that made
	* us unable to handle the page fault gracefully.
	*/
out_of_memory:
	up_read(&mm->mmap_sem);
	if (is_global_init(tsk)) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	if (!user_mode(regs))
		goto no_context;
	pagefault_out_of_memory();
	return;

do_sigbus:
	up_read(&mm->mmap_sem);
	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	else
	/*
	* Send a sigbus, regardless of whether we were in kernel
	* or user mode.
	*/
	tsk->thread.cp0_badvaddr = address;
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void __user *) address;
	force_sig_info(SIGBUS, &info, tsk);
	return;
vmalloc_fault:
	{
		/*
		* Synchronize this task's top level page-table
		* with the 'reference' page table.
		*
		* Do _not_ use "tsk" here. We might be inside
		* an interrupt in the middle of a task switch..
		*/
		int offset = __pgd_offset(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

		pgd = (pgd_t *) pgd_current + offset;
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;
		return;
	}
}
Esempio n. 2
0
File: apic.c Progetto: HPSI/xen-v4v
void __devinit setup_local_APIC(void)
{
    unsigned long oldvalue, value, ver, maxlvt;
    int i, j;

    /* Pound the ESR really hard over the head with a big hammer - mbligh */
    if (esr_disable) {
        apic_write(APIC_ESR, 0);
        apic_write(APIC_ESR, 0);
        apic_write(APIC_ESR, 0);
        apic_write(APIC_ESR, 0);
    }

    value = apic_read(APIC_LVR);
    ver = GET_APIC_VERSION(value);

    BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);

    /*
     * Double-check whether this APIC is really registered.
     */
    if (!apic_id_registered())
        BUG();

    /*
     * Intel recommends to set DFR, LDR and TPR before enabling
     * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
     * document number 292116).  So here it goes...
     */
    init_apic_ldr();

    /*
     * Set Task Priority to reject any interrupts below FIRST_DYNAMIC_VECTOR.
     */
    apic_write_around(APIC_TASKPRI, (FIRST_DYNAMIC_VECTOR & 0xF0) - 0x10);

    /*
     * After a crash, we no longer service the interrupts and a pending
     * interrupt from previous kernel might still have ISR bit set.
     *
     * Most probably by now CPU has serviced that pending interrupt and
     * it might not have done the ack_APIC_irq() because it thought,
     * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
     * does not clear the ISR bit and cpu thinks it has already serivced
     * the interrupt. Hence a vector might get locked. It was noticed
     * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
     */
    for (i = APIC_ISR_NR - 1; i >= 0; i--) {
        value = apic_read(APIC_ISR + i*0x10);
        for (j = 31; j >= 0; j--) {
            if (value & (1<<j))
                ack_APIC_irq();
        }
    }

    /*
     * Now that we are all set up, enable the APIC
     */
    value = apic_read(APIC_SPIV);
    value &= ~APIC_VECTOR_MASK;
    /*
     * Enable APIC
     */
    value |= APIC_SPIV_APIC_ENABLED;

    /*
     * Some unknown Intel IO/APIC (or APIC) errata is biting us with
     * certain networking cards. If high frequency interrupts are
     * happening on a particular IOAPIC pin, plus the IOAPIC routing
     * entry is masked/unmasked at a high rate as well then sooner or
     * later IOAPIC line gets 'stuck', no more interrupts are received
     * from the device. If focus CPU is disabled then the hang goes
     * away, oh well :-(
     *
     * [ This bug can be reproduced easily with a level-triggered
     *   PCI Ne2000 networking cards and PII/PIII processors, dual
     *   BX chipset. ]
     */
    /*
     * Actually disabling the focus CPU check just makes the hang less
     * frequent as it makes the interrupt distributon model be more
     * like LRU than MRU (the short-term load is more even across CPUs).
     * See also the comment in end_level_ioapic_irq().  --macro
     */
#if 1
    /* Enable focus processor (bit==0) */
    value &= ~APIC_SPIV_FOCUS_DISABLED;
#else
    /* Disable focus processor (bit==1) */
    value |= APIC_SPIV_FOCUS_DISABLED;
#endif
    /*
     * Set spurious IRQ vector
     */
    value |= SPURIOUS_APIC_VECTOR;

    /*
     * Enable directed EOI
     */
    if ( directed_eoi_enabled )
    {
        value |= APIC_SPIV_DIRECTED_EOI;
        apic_printk(APIC_VERBOSE, "Suppress EOI broadcast on CPU#%d\n",
                    smp_processor_id());
    }

    apic_write_around(APIC_SPIV, value);

    /*
     * Set up LVT0, LVT1:
     *
     * set up through-local-APIC on the BP's LINT0. This is not
     * strictly necessery in pure symmetric-IO mode, but sometimes
     * we delegate interrupts to the 8259A.
     */
    /*
     * TODO: set up through-local-APIC from through-I/O-APIC? --macro
     */
    value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
    if (!smp_processor_id() && (pic_mode || !value)) {
        value = APIC_DM_EXTINT;
        apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
                    smp_processor_id());
    } else {
        value = APIC_DM_EXTINT | APIC_LVT_MASKED;
        apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
                    smp_processor_id());
    }
    apic_write_around(APIC_LVT0, value);

    /*
     * only the BP should see the LINT1 NMI signal, obviously.
     */
    if (!smp_processor_id())
        value = APIC_DM_NMI;
    else
        value = APIC_DM_NMI | APIC_LVT_MASKED;
    if (!APIC_INTEGRATED(ver))      /* 82489DX */
        value |= APIC_LVT_LEVEL_TRIGGER;
    apic_write_around(APIC_LVT1, value);

    if (APIC_INTEGRATED(ver) && !esr_disable) {        /* !82489DX */
        maxlvt = get_maxlvt();
        if (maxlvt > 3)     /* Due to the Pentium erratum 3AP. */
            apic_write(APIC_ESR, 0);
        oldvalue = apic_read(APIC_ESR);

        value = ERROR_APIC_VECTOR;      // enables sending errors
        apic_write_around(APIC_LVTERR, value);
        /*
         * spec says clear errors after enabling vector.
         */
        if (maxlvt > 3)
            apic_write(APIC_ESR, 0);
        value = apic_read(APIC_ESR);
        if (value != oldvalue)
            apic_printk(APIC_VERBOSE, "ESR value before enabling "
                        "vector: %#lx  after: %#lx\n",
                        oldvalue, value);
    } else {
        if (esr_disable)    
            /* 
             * Something untraceble is creating bad interrupts on 
             * secondary quads ... for the moment, just leave the
             * ESR disabled - we can't do anything useful with the
             * errors anyway - mbligh
             */
            printk("Leaving ESR disabled.\n");
        else
            printk("No ESR for 82489DX.\n");
    }

    if (nmi_watchdog == NMI_LOCAL_APIC)
        setup_apic_nmi_watchdog();
    apic_pm_activate();
}
Esempio n. 3
0
static inline void play_dead(void)
{
	BUG();
}
static int
do_check( PKT_secret_key *sk, const char *tryagain_text, int mode,
          int *canceled )
{
    gpg_error_t err;
    u16 csum=0;
    int i, res;
    size_t nbytes;

    if( sk->is_protected ) { /* remove the protection */
	DEK *dek = NULL;
	u32 keyid[4]; /* 4! because we need two of them */
	gcry_cipher_hd_t cipher_hd=NULL;
	PKT_secret_key *save_sk;

	if( sk->protect.s2k.mode == 1001 ) {
	    log_info(_("secret key parts are not available\n"));
	    return G10ERR_UNU_SECKEY;
	}
	if( sk->protect.algo == CIPHER_ALGO_NONE )
	    BUG();
	if( openpgp_cipher_test_algo( sk->protect.algo ) ) {
	    log_info(_("protection algorithm %d%s is not supported\n"),
			sk->protect.algo,sk->protect.algo==1?" (IDEA)":"" );
	    if (sk->protect.algo==CIPHER_ALGO_IDEA)
              {
                write_status (STATUS_RSA_OR_IDEA);
                idea_cipher_warn (0);
              }
	    return G10ERR_CIPHER_ALGO;
	}
	if(gcry_md_test_algo (sk->protect.s2k.hash_algo))
	  {
	    log_info(_("protection digest %d is not supported\n"),
		     sk->protect.s2k.hash_algo);
	    return G10ERR_DIGEST_ALGO;
	  }
	keyid_from_sk( sk, keyid );
	keyid[2] = keyid[3] = 0;
	if( !sk->is_primary ) {
            keyid[2] = sk->main_keyid[0];
            keyid[3] = sk->main_keyid[1];
	}
	dek = passphrase_to_dek( keyid, sk->pubkey_algo, sk->protect.algo,
				 &sk->protect.s2k, mode,
                                 tryagain_text, canceled );
        if (!dek && canceled && *canceled)
	    return GPG_ERR_CANCELED;


	err = openpgp_cipher_open (&cipher_hd, sk->protect.algo,
				   GCRY_CIPHER_MODE_CFB,
				   (GCRY_CIPHER_SECURE
				    | (sk->protect.algo >= 100 ?
				       0 : GCRY_CIPHER_ENABLE_SYNC)));
        if (err)
          log_fatal ("cipher open failed: %s\n", gpg_strerror (err) );

	err = gcry_cipher_setkey (cipher_hd, dek->key, dek->keylen);
        if (err)
          log_fatal ("set key failed: %s\n", gpg_strerror (err) );

	xfree(dek);
	save_sk = copy_secret_key( NULL, sk );

	gcry_cipher_setiv ( cipher_hd, sk->protect.iv, sk->protect.ivlen );

	csum = 0;
	if( sk->version >= 4 ) {
            int ndata;
	    unsigned int ndatabits;
	    byte *p, *data;
            u16 csumc = 0;

	    i = pubkey_get_npkey(sk->pubkey_algo);

            assert ( gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE ));
            p = gcry_mpi_get_opaque ( sk->skey[i], &ndatabits );
            ndata = (ndatabits+7)/8;

            if ( ndata > 1 && p )
                csumc = p[ndata-2] << 8 | p[ndata-1];
	    data = xmalloc_secure ( ndata );
            if (p)
              gcry_cipher_decrypt ( cipher_hd, data, ndata, p, ndata );
            else
              memset (data, 0, ndata);
	    gcry_mpi_release (sk->skey[i]); sk->skey[i] = NULL ;

	    p = data;
            if (sk->protect.sha1chk) {
                /* This is the new SHA1 checksum method to detect
                   tampering with the key as used by the Klima/Rosa
                   attack */
                sk->csum = 0;
                csum = 1;
                if( ndata < 20 )
                    log_error("not enough bytes for SHA-1 checksum\n");
                else {
                    gcry_md_hd_t h;

                    if ( gcry_md_open (&h, DIGEST_ALGO_SHA1, 1))
                        BUG(); /* Algo not available. */
                    gcry_md_write (h, data, ndata - 20);
                    gcry_md_final (h);
                    if (!memcmp (gcry_md_read (h, DIGEST_ALGO_SHA1),
                                 data + ndata - 20, 20) )
                      {
                        /* Digest does match.  We have to keep the old
                           style checksum in sk->csum, so that the
                           test used for unprotected keys does work.
                           This test gets used when we are adding new
                           keys. */
                        sk->csum = csum = checksum (data, ndata-20);
                      }
                    gcry_md_close (h);
                }
            }
            else {
                if( ndata < 2 ) {
                    log_error("not enough bytes for checksum\n");
                    sk->csum = 0;
                    csum = 1;
                }
                else {
                    csum = checksum( data, ndata-2);
                    sk->csum = data[ndata-2] << 8 | data[ndata-1];
                    if ( sk->csum != csum ) {
                        /* This is a PGP 7.0.0 workaround */
                        sk->csum = csumc; /* take the encrypted one */
                    }
                }
            }

            /* Must check it here otherwise the mpi_read_xx would fail
               because the length may have an arbitrary value */
            if( sk->csum == csum ) {
                for( ; i < pubkey_get_nskey(sk->pubkey_algo); i++ ) {
                    if ( gcry_mpi_scan( &sk->skey[i], GCRYMPI_FMT_PGP,
                                        p, ndata, &nbytes))
                      {
                        /* Checksum was okay, but not correctly
                           decrypted.  */
                        sk->csum = 0;
                        csum = 1;
                        break;
                      }
                    ndata -= nbytes;
                    p += nbytes;
                }
                /* Note: at this point ndata should be 2 for a simple
                   checksum or 20 for the sha1 digest */
            }
	    xfree(data);
	}
	else {
	    for(i=pubkey_get_npkey(sk->pubkey_algo);
		    i < pubkey_get_nskey(sk->pubkey_algo); i++ ) {
                byte *p;
                size_t ndata;
                unsigned int ndatabits;

                assert (gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE));
                p = gcry_mpi_get_opaque (sk->skey[i], &ndatabits);
                if (!p)
                  err = -1;
                else
                  {
                    byte *buffer;

                    ndata = (ndatabits+7)/8;
                    assert (ndata >= 2);
                    assert (ndata == ((p[0] << 8 | p[1]) + 7)/8 + 2);
                    buffer = xmalloc_secure (ndata);
                    gcry_cipher_sync (cipher_hd);
                    buffer[0] = p[0];
                    buffer[1] = p[1];
                    gcry_cipher_decrypt (cipher_hd, buffer+2, ndata-2,
                                         p+2, ndata-2);
                    csum += checksum (buffer, ndata);
                    gcry_mpi_release (sk->skey[i]);

                    err = gcry_mpi_scan( &sk->skey[i], GCRYMPI_FMT_PGP,
                                         buffer, ndata, &ndata );
                    xfree (buffer);
                  }
                if (err)
                  {
                    /* Checksum was okay, but not correctly
                       decrypted.  */
                    sk->csum = 0;
                    csum = 1;
                    break;
                  }
/*  		csum += checksum_mpi (sk->skey[i]); */
	    }
	}
	gcry_cipher_close ( cipher_hd );

	/* Now let's see whether we have used the correct passphrase. */
	if( csum != sk->csum ) {
	    copy_secret_key( sk, save_sk );
            passphrase_clear_cache ( keyid, NULL, sk->pubkey_algo );
	    free_secret_key( save_sk );
	    return gpg_error (GPG_ERR_BAD_PASSPHRASE);
	}

	/* The checksum may fail, so we also check the key itself. */
	res = pk_check_secret_key ( sk->pubkey_algo, sk->skey );
	if( res ) {
	    copy_secret_key( sk, save_sk );
            passphrase_clear_cache ( keyid, NULL, sk->pubkey_algo );
	    free_secret_key( save_sk );
	    return gpg_error (GPG_ERR_BAD_PASSPHRASE);
	}
	free_secret_key( save_sk );
	sk->is_protected = 0;
    }
    else { /* not protected, assume it is okay if the checksum is okay */
	csum = 0;
	for(i=pubkey_get_npkey(sk->pubkey_algo);
		i < pubkey_get_nskey(sk->pubkey_algo); i++ ) {
	    csum += checksum_mpi( sk->skey[i] );
	}
	if( csum != sk->csum )
	    return G10ERR_CHECKSUM;
    }

    return 0;
}
Esempio n. 5
0
/**
 * write_mft_record_nolock - write out a mapped (extent) mft record
 * @ni:		ntfs inode describing the mapped (extent) mft record
 * @m:		mapped (extent) mft record to write
 * @sync:	if true, wait for i/o completion
 *
 * Write the mapped (extent) mft record @m described by the (regular or extent)
 * ntfs inode @ni to backing store.  If the mft record @m has a counterpart in
 * the mft mirror, that is also updated.
 *
 * On success, clean the mft record and return 0.  On error, leave the mft
 * record dirty and return -errno.  The caller should call make_bad_inode() on
 * the base inode to ensure no more access happens to this inode.  We do not do
 * it here as the caller may want to finish writing other extent mft records
 * first to minimize on-disk metadata inconsistencies.
 *
 * NOTE:  We always perform synchronous i/o and ignore the @sync parameter.
 * However, if the mft record has a counterpart in the mft mirror and @sync is
 * true, we write the mft record, wait for i/o completion, and only then write
 * the mft mirror copy.  This ensures that if the system crashes either the mft
 * or the mft mirror will contain a self-consistent mft record @m.  If @sync is
 * false on the other hand, we start i/o on both and then wait for completion
 * on them.  This provides a speedup but no longer guarantees that you will end
 * up with a self-consistent mft record in the case of a crash but if you asked
 * for asynchronous writing you probably do not care about that anyway.
 *
 * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just
 * schedule i/o via ->writepage or do it via kntfsd or whatever.
 */
int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
{
	ntfs_volume *vol = ni->vol;
	struct page *page = ni->page;
	unsigned int blocksize = vol->sb->s_blocksize;
	int max_bhs = vol->mft_record_size / blocksize;
	struct buffer_head *bhs[max_bhs];
	struct buffer_head *bh, *head;
	unsigned int block_start, block_end, m_start, m_end;
	int i_bhs, nr_bhs, err = 0;

	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
	BUG_ON(NInoAttr(ni));
	BUG_ON(!max_bhs);
	BUG_ON(!PageLocked(page));
	/*
	 * If the ntfs_inode is clean no need to do anything.  If it is dirty,
	 * mark it as clean now so that it can be redirtied later on if needed.
	 * There is no danger of races since the caller is holding the locks
	 * for the mft record @m and the page it is in.
	 */
	if (!NInoTestClearDirty(ni))
		goto done;
	/* Make sure we have mapped buffers. */
	if (!page_has_buffers(page)) {
no_buffers_err_out:
		ntfs_error(vol->sb, "Writing mft records without existing "
				"buffers is not implemented yet.  %s",
				ntfs_please_email);
		err = -EOPNOTSUPP;
		goto err_out;
	}
	bh = head = page_buffers(page);
	if (!bh)
		goto no_buffers_err_out;
	nr_bhs = 0;
	block_start = 0;
	m_start = ni->page_ofs;
	m_end = m_start + vol->mft_record_size;
	do {
		block_end = block_start + blocksize;
		/*
		 * If the buffer is outside the mft record, just skip it,
		 * clearing it if it is dirty to make sure it is not written
		 * out.  It should never be marked dirty but better be safe.
		 */
		if ((block_end <= m_start) || (block_start >= m_end)) {
			if (buffer_dirty(bh)) {
				ntfs_warning(vol->sb, "Clearing dirty mft "
						"record page buffer.  %s",
						ntfs_please_email);
				clear_buffer_dirty(bh);
			}
			continue;
		}
		if (!buffer_mapped(bh)) {
			ntfs_error(vol->sb, "Writing mft records without "
					"existing mapped buffers is not "
					"implemented yet.  %s",
					ntfs_please_email);
			err = -EOPNOTSUPP;
			continue;
		}
		if (!buffer_uptodate(bh)) {
			ntfs_error(vol->sb, "Writing mft records without "
					"existing uptodate buffers is not "
					"implemented yet.  %s",
					ntfs_please_email);
			err = -EOPNOTSUPP;
			continue;
		}
		BUG_ON(!nr_bhs && (m_start != block_start));
		BUG_ON(nr_bhs >= max_bhs);
		bhs[nr_bhs++] = bh;
		BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
	} while (block_start = block_end, (bh = bh->b_this_page) != head);
	if (unlikely(err))
		goto cleanup_out;
	/* Apply the mst protection fixups. */
	err = pre_write_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size);
	if (err) {
		ntfs_error(vol->sb, "Failed to apply mst fixups!");
		goto cleanup_out;
	}
	flush_dcache_mft_record_page(ni);
	/* Lock buffers and start synchronous write i/o on them. */
	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
		struct buffer_head *tbh = bhs[i_bhs];

		if (unlikely(test_set_buffer_locked(tbh)))
			BUG();
		BUG_ON(!buffer_uptodate(tbh));
		if (buffer_dirty(tbh))
			clear_buffer_dirty(tbh);
		get_bh(tbh);
		tbh->b_end_io = end_buffer_write_sync;
		submit_bh(WRITE, tbh);
	}
	/* Synchronize the mft mirror now if not @sync. */
	if (!sync && ni->mft_no < vol->mftmirr_size)
		sync_mft_mirror(ni, m, sync);
	/* Wait on i/o completion of buffers. */
	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
		struct buffer_head *tbh = bhs[i_bhs];

		wait_on_buffer(tbh);
		if (unlikely(!buffer_uptodate(tbh))) {
			err = -EIO;
			/*
			 * Set the buffer uptodate so the page & buffer states
			 * don't become out of sync.
			 */
			if (PageUptodate(page))
				set_buffer_uptodate(tbh);
		}
	}
	/* If @sync, now synchronize the mft mirror. */
	if (sync && ni->mft_no < vol->mftmirr_size)
		sync_mft_mirror(ni, m, sync);
	/* Remove the mst protection fixups again. */
	post_write_mst_fixup((NTFS_RECORD*)m);
	flush_dcache_mft_record_page(ni);
	if (unlikely(err)) {
		/* I/O error during writing.  This is really bad! */
		ntfs_error(vol->sb, "I/O error while writing mft record "
				"0x%lx!  Marking base inode as bad.  You "
				"should unmount the volume and run chkdsk.",
				ni->mft_no);
		goto err_out;
	}
done:
	ntfs_debug("Done.");
	return 0;
cleanup_out:
	/* Clean the buffers. */
	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
		clear_buffer_dirty(bhs[i_bhs]);
err_out:
	/*
	 * Current state: all buffers are clean, unlocked, and uptodate.
	 * The caller should mark the base inode as bad so that no more i/o
	 * happens.  ->clear_inode() will still be invoked so all extent inodes
	 * and other allocated memory will be freed.
	 */
	if (err == -ENOMEM) {
		ntfs_error(vol->sb, "Not enough memory to write mft record.  "
				"Redirtying so the write is retried later.");
		mark_mft_record_dirty(ni);
		err = 0;
	}
	return err;
}
Esempio n. 6
0
/*
 * receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
		  struct msghdr *msg, size_t len, int flags)
{
	struct rxrpc_skb_priv *sp;
	struct rxrpc_call *call = NULL, *continue_call = NULL;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct sk_buff *skb;
	long timeo;
	int copy, ret, ullen, offset, copied = 0;
	u32 abort_code;

	DEFINE_WAIT(wait);

	_enter(",,,%zu,%d", len, flags);

	if (flags & (MSG_OOB | MSG_TRUNC))
		return -EOPNOTSUPP;

	ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);

	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
	msg->msg_flags |= MSG_MORE;

	lock_sock(&rx->sk);

	for (;;) {
		/* return immediately if a client socket has no outstanding
		 * calls */
		if (RB_EMPTY_ROOT(&rx->calls)) {
			if (copied)
				goto out;
			if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
				release_sock(&rx->sk);
				if (continue_call)
					rxrpc_put_call(continue_call);
				return -ENODATA;
			}
		}

		/* get the next message on the Rx queue */
		skb = skb_peek(&rx->sk.sk_receive_queue);
		if (!skb) {
			/* nothing remains on the queue */
			if (copied &&
			    (msg->msg_flags & MSG_PEEK || timeo == 0))
				goto out;

			/* wait for a message to turn up */
			release_sock(&rx->sk);
			prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
						  TASK_INTERRUPTIBLE);
			ret = sock_error(&rx->sk);
			if (ret)
				goto wait_error;

			if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
				if (signal_pending(current))
					goto wait_interrupted;
				timeo = schedule_timeout(timeo);
			}
			finish_wait(sk_sleep(&rx->sk), &wait);
			lock_sock(&rx->sk);
			continue;
		}

	peek_next_packet:
		sp = rxrpc_skb(skb);
		call = sp->call;
		ASSERT(call != NULL);

		_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);

		/* make sure we wait for the state to be updated in this call */
		spin_lock_bh(&call->lock);
		spin_unlock_bh(&call->lock);

		if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
			_debug("packet from released call");
			if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
				BUG();
			rxrpc_free_skb(skb);
			continue;
		}

		/* determine whether to continue last data receive */
		if (continue_call) {
			_debug("maybe cont");
			if (call != continue_call ||
			    skb->mark != RXRPC_SKB_MARK_DATA) {
				release_sock(&rx->sk);
				rxrpc_put_call(continue_call);
				_leave(" = %d [noncont]", copied);
				return copied;
			}
		}

		rxrpc_get_call(call);

		/* copy the peer address and timestamp */
		if (!continue_call) {
			if (msg->msg_name) {
				size_t len =
					sizeof(call->conn->trans->peer->srx);
				memcpy(msg->msg_name,
				       &call->conn->trans->peer->srx, len);
				msg->msg_namelen = len;
			}
			sock_recv_ts_and_drops(msg, &rx->sk, skb);
		}

		/* receive the message */
		if (skb->mark != RXRPC_SKB_MARK_DATA)
			goto receive_non_data_message;

		_debug("recvmsg DATA #%u { %d, %d }",
		       ntohl(sp->hdr.seq), skb->len, sp->offset);

		if (!continue_call) {
			/* only set the control data once per recvmsg() */
			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
				       ullen, &call->user_call_ID);
			if (ret < 0)
				goto copy_error;
			ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		}

		ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
		ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
		call->rx_data_recv = ntohl(sp->hdr.seq);

		ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);

		offset = sp->offset;
		copy = skb->len - offset;
		if (copy > len - copied)
			copy = len - copied;

		ret = skb_copy_datagram_msg(skb, offset, msg, copy);

		if (ret < 0)
			goto copy_error;

		/* handle piecemeal consumption of data packets */
		_debug("copied %d+%d", copy, copied);

		offset += copy;
		copied += copy;

		if (!(flags & MSG_PEEK))
			sp->offset = offset;

		if (sp->offset < skb->len) {
			_debug("buffer full");
			ASSERTCMP(copied, ==, len);
			break;
		}

		/* we transferred the whole data packet */
		if (sp->hdr.flags & RXRPC_LAST_PACKET) {
			_debug("last");
			if (call->conn->out_clientflag) {
				 /* last byte of reply received */
				ret = copied;
				goto terminal_message;
			}

			/* last bit of request received */
			if (!(flags & MSG_PEEK)) {
				_debug("eat packet");
				if (skb_dequeue(&rx->sk.sk_receive_queue) !=
				    skb)
					BUG();
				rxrpc_free_skb(skb);
			}
			msg->msg_flags &= ~MSG_MORE;
			break;
		}

		/* move on to the next data message */
		_debug("next");
		if (!continue_call)
			continue_call = sp->call;
		else
			rxrpc_put_call(call);
		call = NULL;

		if (flags & MSG_PEEK) {
			_debug("peek next");
			skb = skb->next;
			if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
				break;
			goto peek_next_packet;
		}

		_debug("eat packet");
		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
			BUG();
		rxrpc_free_skb(skb);
	}
Esempio n. 7
0
File: irq.c Progetto: 168519/linux
static void bcm63xx_init_irq(void)
{
	int irq_bits;

	irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
	irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
	irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
	irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);

	switch (bcm63xx_get_cpu_id()) {
	case BCM3368_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
		irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
		break;
	case BCM6328_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
		irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
		irq_bits = 64;
		ext_irq_count = 4;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
		break;
	case BCM6338_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
		irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
		break;
	case BCM6345_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
		irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
		break;
	case BCM6348_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
		irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
		break;
	case BCM6358_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
		irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
		irq_bits = 32;
		ext_irq_count = 4;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
		break;
	case BCM6362_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
		irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
		irq_bits = 64;
		ext_irq_count = 4;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
		break;
	case BCM6368_CPU_ID:
		irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
		irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
		irq_bits = 64;
		ext_irq_count = 6;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
		break;
	default:
		BUG();
	}

	if (irq_bits == 32) {
		dispatch_internal = __dispatch_internal_32;
		internal_irq_mask = __internal_irq_mask_32;
		internal_irq_unmask = __internal_irq_unmask_32;
	} else {
		dispatch_internal = __dispatch_internal_64;
		internal_irq_mask = __internal_irq_mask_64;
		internal_irq_unmask = __internal_irq_unmask_64;
	}
}
Esempio n. 8
0
/*
 * Called with IRQs disabled. IRQs must remain disabled from that point
 * all the way until processing this kprobe is complete.  The current
 * kprobes implementation cannot process more than one nested level of
 * kprobe, and that level is reserved for user kprobe handlers, so we can't
 * risk encountering a new kprobe in an interrupt handler.
 */
void __kprobes kprobe_handler(struct pt_regs *regs)
{
	struct kprobe *p, *cur;
	struct kprobe_ctlblk *kcb;

	kcb = get_kprobe_ctlblk();
	cur = kprobe_running();

#ifdef CONFIG_THUMB2_KERNEL
	/*
	 * First look for a probe which was registered using an address with
	 * bit 0 set, this is the usual situation for pointers to Thumb code.
	 * If not found, fallback to looking for one with bit 0 clear.
	 */
	p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
	if (!p)
		p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);

#else /* ! CONFIG_THUMB2_KERNEL */
	p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
#endif

	if (p) {
		if (cur) {
			/* Kprobe is pending, so we're recursing. */
			switch (kcb->kprobe_status) {
			case KPROBE_HIT_ACTIVE:
			case KPROBE_HIT_SSDONE:
				/* A pre- or post-handler probe got us here. */
				kprobes_inc_nmissed_count(p);
				save_previous_kprobe(kcb);
				set_current_kprobe(p);
				kcb->kprobe_status = KPROBE_REENTER;
				singlestep(p, regs, kcb);
				restore_previous_kprobe(kcb);
				break;
			default:
				/* impossible cases */
				BUG();
			}
		} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
			/* Probe hit and conditional execution check ok. */
			set_current_kprobe(p);
			kcb->kprobe_status = KPROBE_HIT_ACTIVE;

			/*
			 * If we have no pre-handler or it returned 0, we
			 * continue with normal processing.  If we have a
			 * pre-handler and it returned non-zero, it prepped
			 * for calling the break_handler below on re-entry,
			 * so get out doing nothing more here.
			 */
			if (!p->pre_handler || !p->pre_handler(p, regs)) {
				kcb->kprobe_status = KPROBE_HIT_SS;
				singlestep(p, regs, kcb);
				if (p->post_handler) {
					kcb->kprobe_status = KPROBE_HIT_SSDONE;
					p->post_handler(p, regs, 0);
				}
				reset_current_kprobe();
			}
		} else {
			/*
			 * Probe hit but conditional execution check failed,
			 * so just skip the instruction and continue as if
			 * nothing had happened.
			 */
			singlestep_skip(p, regs);
		}
	} else if (cur) {
		/* We probably hit a jprobe.  Call its break handler. */
		if (cur->break_handler && cur->break_handler(cur, regs)) {
			kcb->kprobe_status = KPROBE_HIT_SS;
			singlestep(cur, regs, kcb);
			if (cur->post_handler) {
				kcb->kprobe_status = KPROBE_HIT_SSDONE;
				cur->post_handler(cur, regs, 0);
			}
		}
		reset_current_kprobe();
	} else {
		/*
		 * The probe was removed and a race is in progress.
		 * There is nothing we can do about it.  Let's restart
		 * the instruction.  By the time we can restart, the
		 * real instruction will be there.
		 */
	}
}
Esempio n. 9
0
/**
 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
 *
 * @vmw_tt: Pointer to a struct vmw_ttm_tt
 *
 * Select the correct function for and make sure the TTM pages are
 * visible to the device. Allocate storage for the device mappings.
 * If a mapping has already been performed, indicated by the storage
 * pointer being non NULL, the function returns success.
 */
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
	struct vmw_private *dev_priv = vmw_tt->dev_priv;
	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false
	};
	struct vmw_piter iter;
	dma_addr_t old;
	int ret = 0;
	static size_t sgl_size;
	static size_t sgt_size;

	if (vmw_tt->mapped)
		return 0;

	vsgt->mode = dev_priv->map_mode;
	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
	vsgt->sgt = &vmw_tt->sgt;

	switch (dev_priv->map_mode) {
	case vmw_dma_map_bind:
	case vmw_dma_map_populate:
		if (unlikely(!sgl_size)) {
			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
			sgt_size = ttm_round_pot(sizeof(struct sg_table));
		}
		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
		if (unlikely(ret != 0))
			return ret;

		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
						vsgt->num_pages, 0,
						(unsigned long)
						vsgt->num_pages << PAGE_SHIFT,
						GFP_KERNEL);
		if (unlikely(ret != 0))
			goto out_sg_alloc_fail;

		if (vsgt->num_pages > vmw_tt->sgt.nents) {
			uint64_t over_alloc =
				sgl_size * (vsgt->num_pages -
					    vmw_tt->sgt.nents);

			ttm_mem_global_free(glob, over_alloc);
			vmw_tt->sg_alloc_size -= over_alloc;
		}

		ret = vmw_ttm_map_for_dma(vmw_tt);
		if (unlikely(ret != 0))
			goto out_map_fail;

		break;
	default:
		break;
	}

	old = ~((dma_addr_t) 0);
	vmw_tt->vsgt.num_regions = 0;
	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
		dma_addr_t cur = vmw_piter_dma_addr(&iter);

		if (cur != old + PAGE_SIZE)
			vmw_tt->vsgt.num_regions++;
		old = cur;
	}

	vmw_tt->mapped = true;
	return 0;

out_map_fail:
	sg_free_table(vmw_tt->vsgt.sgt);
	vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
	return ret;
}

/**
 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
 *
 * @vmw_tt: Pointer to a struct vmw_ttm_tt
 *
 * Tear down any previously set up device DMA mappings and free
 * any storage space allocated for them. If there are no mappings set up,
 * this function is a NOP.
 */
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
{
	struct vmw_private *dev_priv = vmw_tt->dev_priv;

	if (!vmw_tt->vsgt.sgt)
		return;

	switch (dev_priv->map_mode) {
	case vmw_dma_map_bind:
	case vmw_dma_map_populate:
		vmw_ttm_unmap_from_dma(vmw_tt);
		sg_free_table(vmw_tt->vsgt.sgt);
		vmw_tt->vsgt.sgt = NULL;
		ttm_mem_global_free(vmw_mem_glob(dev_priv),
				    vmw_tt->sg_alloc_size);
		break;
	default:
		break;
	}
	vmw_tt->mapped = false;
}


/**
 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
 *
 * @bo: Pointer to a struct ttm_buffer_object
 *
 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
 * instead of a pointer to a struct vmw_ttm_backend as argument.
 * Note that the buffer object must be either pinned or reserved before
 * calling this function.
 */
int vmw_bo_map_dma(struct ttm_buffer_object *bo)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	return vmw_ttm_map_dma(vmw_tt);
}


/**
 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
 *
 * @bo: Pointer to a struct ttm_buffer_object
 *
 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
 * instead of a pointer to a struct vmw_ttm_backend as argument.
 */
void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	vmw_ttm_unmap_dma(vmw_tt);
}


/**
 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
 * TTM buffer object
 *
 * @bo: Pointer to a struct ttm_buffer_object
 *
 * Returns a pointer to a struct vmw_sg_table object. The object should
 * not be freed after use.
 * Note that for the device addresses to be valid, the buffer object must
 * either be reserved or pinned.
 */
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	return &vmw_tt->vsgt;
}


static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
	struct vmw_ttm_tt *vmw_be =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
	int ret;

	ret = vmw_ttm_map_dma(vmw_be);
	if (unlikely(ret != 0))
		return ret;

	vmw_be->gmr_id = bo_mem->start;
	vmw_be->mem_type = bo_mem->mem_type;

	switch (bo_mem->mem_type) {
	case VMW_PL_GMR:
		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
				    ttm->num_pages, vmw_be->gmr_id);
	case VMW_PL_MOB:
		if (unlikely(vmw_be->mob == NULL)) {
			vmw_be->mob =
				vmw_mob_create(ttm->num_pages);
			if (unlikely(vmw_be->mob == NULL))
				return -ENOMEM;
		}

		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
				    &vmw_be->vsgt, ttm->num_pages,
				    vmw_be->gmr_id);
	default:
		BUG();
	}
	return 0;
}

static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
	struct vmw_ttm_tt *vmw_be =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	switch (vmw_be->mem_type) {
	case VMW_PL_GMR:
		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
		break;
	case VMW_PL_MOB:
		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
		break;
	default:
		BUG();
	}

	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
		vmw_ttm_unmap_dma(vmw_be);

	return 0;
}


static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
	struct vmw_ttm_tt *vmw_be =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	vmw_ttm_unmap_dma(vmw_be);
	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
		ttm_dma_tt_fini(&vmw_be->dma_ttm);
	else
		ttm_tt_fini(ttm);

	if (vmw_be->mob)
		vmw_mob_destroy(vmw_be->mob);

	kfree(vmw_be);
}


static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
	struct vmw_private *dev_priv = vmw_tt->dev_priv;
	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
	int ret;

	if (ttm->state != tt_unpopulated)
		return 0;

	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
		size_t size =
			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
		ret = ttm_mem_global_alloc(glob, size, ctx);
		if (unlikely(ret != 0))
			return ret;

		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
					ctx);
		if (unlikely(ret != 0))
			ttm_mem_global_free(glob, size);
	} else
		ret = ttm_pool_populate(ttm, ctx);

	return ret;
}

static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
{
	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
						 dma_ttm.ttm);
	struct vmw_private *dev_priv = vmw_tt->dev_priv;
	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);


	if (vmw_tt->mob) {
		vmw_mob_destroy(vmw_tt->mob);
		vmw_tt->mob = NULL;
	}

	vmw_ttm_unmap_dma(vmw_tt);
	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
		size_t size =
			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));

		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
		ttm_mem_global_free(glob, size);
	} else
		ttm_pool_unpopulate(ttm);
}

static struct ttm_backend_func vmw_ttm_func = {
	.bind = vmw_ttm_bind,
	.unbind = vmw_ttm_unbind,
	.destroy = vmw_ttm_destroy,
};

static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
					uint32_t page_flags)
{
	struct vmw_ttm_tt *vmw_be;
	int ret;

	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
	if (!vmw_be)
		return NULL;

	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
	vmw_be->mob = NULL;

	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
	else
		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
	if (unlikely(ret != 0))
		goto out_no_init;

	return &vmw_be->dma_ttm.ttm;
out_no_init:
	kfree(vmw_be);
	return NULL;
}

static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	return 0;
}

static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
		      struct ttm_mem_type_manager *man)
{
	switch (type) {
	case TTM_PL_SYSTEM:
		/* System memory */

		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_CACHED;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_VRAM:
		/* "On-card" video ram */
		man->func = &ttm_bo_manager_func;
		man->gpu_offset = 0;
		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_CACHED;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case VMW_PL_GMR:
	case VMW_PL_MOB:
		/*
		 * "Guest Memory Regions" is an aperture like feature with
		 *  one slot per bo. There is an upper limit of the number of
		 *  slots as well as the bo size.
		 */
		man->func = &vmw_gmrid_manager_func;
		man->gpu_offset = 0;
		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_CACHED;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}

static void vmw_evict_flags(struct ttm_buffer_object *bo,
		     struct ttm_placement *placement)
{
	*placement = vmw_sys_placement;
}

static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	struct ttm_object_file *tfile =
		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;

	return vmw_user_dmabuf_verify_access(bo, tfile);
}

static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);

	mem->bus.addr = NULL;
	mem->bus.is_iomem = false;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
	case VMW_PL_GMR:
	case VMW_PL_MOB:
		return 0;
	case TTM_PL_VRAM:
		mem->bus.offset = mem->start << PAGE_SHIFT;
		mem->bus.base = dev_priv->vram_start;
		mem->bus.is_iomem = true;
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}

static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
	return 0;
}

/**
 * vmw_move_notify - TTM move_notify_callback
 *
 * @bo: The TTM buffer object about to move.
 * @mem: The struct ttm_mem_reg indicating to what memory
 *       region the move is taking place.
 *
 * Calls move_notify for all subsystems needing it.
 * (currently only resources).
 */
static void vmw_move_notify(struct ttm_buffer_object *bo,
			    bool evict,
			    struct ttm_mem_reg *mem)
{
	vmw_resource_move_notify(bo, mem);
	vmw_query_move_notify(bo, mem);
}
Esempio n. 10
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;

	nlh = NLMSG_PUT(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(skb->nf_bridge->physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(skb->nf_bridge->physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)) ||
		    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
			    skb_mac_header(skb)))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			struct file *file = skb->sk->sk_socket->file;
			__be32 uid = htonl(file->f_cred->fsuid);
			__be32 gid = htonl(file->f_cred->fsgid);
			read_unlock_bh(&skb->sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&global_seq))))
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
{
	struct p9_flock flock;
	struct p9_fid *fid;
	uint8_t status;
	int res = 0;
	unsigned char fl_type;

	fid = filp->private_data;
	BUG_ON(fid == NULL);

	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
		BUG();

	res = posix_lock_file_wait(filp, fl);
	if (res < 0)
		goto out;

	/* convert posix lock to p9 tlock args */
	memset(&flock, 0, sizeof(flock));
	/* map the lock type */
	switch (fl->fl_type) {
	case F_RDLCK:
		flock.type = P9_LOCK_TYPE_RDLCK;
		break;
	case F_WRLCK:
		flock.type = P9_LOCK_TYPE_WRLCK;
		break;
	case F_UNLCK:
		flock.type = P9_LOCK_TYPE_UNLCK;
		break;
	}
	flock.start = fl->fl_start;
	if (fl->fl_end == OFFSET_MAX)
		flock.length = 0;
	else
		flock.length = fl->fl_end - fl->fl_start + 1;
	flock.proc_id = fl->fl_pid;
	flock.client_id = fid->clnt->name;
	if (IS_SETLKW(cmd))
		flock.flags = P9_LOCK_FLAGS_BLOCK;

	/*
	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
	 * for lock request, keep on trying
	 */
	for (;;) {
		res = p9_client_lock_dotl(fid, &flock, &status);
		if (res < 0)
			break;

		if (status != P9_LOCK_BLOCKED)
			break;
		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
			break;
		if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
			break;
	}

	/* map 9p status to VFS status */
	switch (status) {
	case P9_LOCK_SUCCESS:
		res = 0;
		break;
	case P9_LOCK_BLOCKED:
		res = -EAGAIN;
		break;
	case P9_LOCK_ERROR:
	case P9_LOCK_GRACE:
		res = -ENOLCK;
		break;
	default:
		BUG();
	}

	/*
	 * incase server returned error for lock request, revert
	 * it locally
	 */
	if (res < 0 && fl->fl_type != F_UNLCK) {
		fl_type = fl->fl_type;
		fl->fl_type = F_UNLCK;
		res = posix_lock_file_wait(filp, fl);
		fl->fl_type = fl_type;
	}
out:
	return res;
}
Esempio n. 12
0
/** Initialize <b>crypto</b> from the key material in key_data.
 *
 * If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
 * service circuits and <b>key_data</b> must be at least
 * HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
 *
 * If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
 * bytes, which are used as follows:
 *   - 20 to initialize f_digest
 *   - 20 to initialize b_digest
 *   - 16 to key f_crypto
 *   - 16 to key b_crypto
 *
 * (If 'reverse' is true, then f_XX and b_XX are swapped.)
 *
 * Return 0 if init was successful, else -1 if it failed.
 */
int
relay_crypto_init(relay_crypto_t *crypto,
                  const char *key_data, size_t key_data_len,
                  int reverse, int is_hs_v3)
{
  crypto_digest_t *tmp_digest;
  crypto_cipher_t *tmp_crypto;
  size_t digest_len = 0;
  size_t cipher_key_len = 0;

  tor_assert(crypto);
  tor_assert(key_data);
  tor_assert(!(crypto->f_crypto || crypto->b_crypto ||
             crypto->f_digest || crypto->b_digest));

  /* Basic key size validation */
  if (is_hs_v3 && BUG(key_data_len != HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN)) {
    goto err;
  } else if (!is_hs_v3 && BUG(key_data_len != CPATH_KEY_MATERIAL_LEN)) {
    goto err;
  }

  /* If we are using this crypto for next gen onion services use SHA3-256,
     otherwise use good ol' SHA1 */
  if (is_hs_v3) {
    digest_len = DIGEST256_LEN;
    cipher_key_len = CIPHER256_KEY_LEN;
    crypto->f_digest = crypto_digest256_new(DIGEST_SHA3_256);
    crypto->b_digest = crypto_digest256_new(DIGEST_SHA3_256);
  } else {
    digest_len = DIGEST_LEN;
    cipher_key_len = CIPHER_KEY_LEN;
    crypto->f_digest = crypto_digest_new();
    crypto->b_digest = crypto_digest_new();
  }

  tor_assert(digest_len != 0);
  tor_assert(cipher_key_len != 0);
  const int cipher_key_bits = (int) cipher_key_len * 8;

  crypto_digest_add_bytes(crypto->f_digest, key_data, digest_len);
  crypto_digest_add_bytes(crypto->b_digest, key_data+digest_len, digest_len);

  crypto->f_crypto = crypto_cipher_new_with_bits(key_data+(2*digest_len),
                                                cipher_key_bits);
  if (!crypto->f_crypto) {
    log_warn(LD_BUG,"Forward cipher initialization failed.");
    goto err;
  }

  crypto->b_crypto = crypto_cipher_new_with_bits(
                                        key_data+(2*digest_len)+cipher_key_len,
                                        cipher_key_bits);
  if (!crypto->b_crypto) {
    log_warn(LD_BUG,"Backward cipher initialization failed.");
    goto err;
  }

  if (reverse) {
    tmp_digest = crypto->f_digest;
    crypto->f_digest = crypto->b_digest;
    crypto->b_digest = tmp_digest;
    tmp_crypto = crypto->f_crypto;
    crypto->f_crypto = crypto->b_crypto;
    crypto->b_crypto = tmp_crypto;
  }

  return 0;
 err:
  relay_crypto_clear(crypto);
  return -1;
}
Esempio n. 13
0
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
    BUG();
}
Esempio n. 14
0
void __init early_init_dt_scan_chosen_arch(unsigned long node)
{
    BUG();
}
Esempio n. 15
0
static gpg_error_t
keyserver_get (ctrl_t ctrl, KEYDB_SEARCH_DESC *desc, int ndesc,
               struct keyserver_spec *keyserver)

{
  gpg_error_t err = 0;
  char **pattern;
  int idx, npat;
  estream_t datastream;

  /* Create an array filled with a search pattern for each key.  The
     array is delimited by a NULL entry.  */
  pattern = xtrycalloc (ndesc+1, sizeof *pattern);
  if (!pattern)
    return gpg_error_from_syserror ();
  for (npat=idx=0; idx < ndesc; idx++)
    {
      int quiet = 0;

      if (desc[idx].mode == KEYDB_SEARCH_MODE_FPR20
          || desc[idx].mode == KEYDB_SEARCH_MODE_FPR16)
        {
          pattern[npat] = xtrymalloc (2+2*20+1);
          if (!pattern[npat])
            err = gpg_error_from_syserror ();
          else
            {
              strcpy (pattern[npat], "0x");
              bin2hex (desc[idx].u.fpr,
                       desc[idx].mode == KEYDB_SEARCH_MODE_FPR20? 20 : 16,
                       pattern[npat]+2);
              npat++;
            }
        }
      else if(desc[idx].mode == KEYDB_SEARCH_MODE_LONG_KID)
        {
          pattern[npat] = xtryasprintf ("0x%08lX%08lX",
                                        (ulong)desc[idx].u.kid[0],
                                        (ulong)desc[idx].u.kid[1]);
          if (!pattern[npat])
            err = gpg_error_from_syserror ();
          else
            npat++;
        }
      else if(desc[idx].mode == KEYDB_SEARCH_MODE_SHORT_KID)
        {
          pattern[npat] = xtryasprintf ("0x%08lX", (ulong)desc[idx].u.kid[1]);
          if (!pattern[npat])
            err = gpg_error_from_syserror ();
          else
            npat++;
        }
      else if(desc[idx].mode == KEYDB_SEARCH_MODE_EXACT)
        {
          /* FIXME: We don't need this.  It is used as a dummy by
             keyserver_fetch which passes an entire URL.  Better use a
             separate function here. */
          pattern[npat] = xtrystrdup ("0x0000000000000000");
          if (!pattern[npat])
            err = gpg_error_from_syserror ();
          else
            {
              npat++;
              quiet = 1;
            }
        }
      else if (desc[idx].mode == KEYDB_SEARCH_MODE_NONE)
        continue;
      else
        BUG();

      if (err)
        {
          for (idx=0; idx < npat; idx++)
            xfree (pattern[idx]);
          xfree (pattern);
          return err;
        }

      if (!quiet && keyserver)
        {
          if (keyserver->host)
            log_info (_("requesting key %s from %s server %s\n"),
                      keystr_from_desc (&desc[idx]),
                      keyserver->scheme, keyserver->host);
          else
            log_info (_("requesting key %s from %s\n"),
                      keystr_from_desc (&desc[idx]), keyserver->uri);
        }
    }


  err = gpg_dirmngr_ks_get (ctrl, pattern, &datastream);
  for (idx=0; idx < npat; idx++)
    xfree (pattern[idx]);
  xfree (pattern);
  if (!err)
    {
      void *stats_handle;

      stats_handle = import_new_stats_handle();

      /* FIXME: Check whether this comment should be moved to dirmngr.

         Slurp up all the key data.  In the future, it might be nice
         to look for KEY foo OUTOFBAND and FAILED indicators.  It's
         harmless to ignore them, but ignoring them does make gpg
         complain about "no valid OpenPGP data found".  One way to do
         this could be to continue parsing this line-by-line and make
         a temp iobuf for each key. */

      import_keys_es_stream (ctrl, datastream, stats_handle, NULL, NULL,
                             opt.keyserver_options.import_options);

      import_print_stats (stats_handle);
      import_release_stats_handle (stats_handle);
    }
  es_fclose (datastream);


  return err;
}
/*
 * Insert a prio_tree_node @node into a radix priority search tree @root. The
 * algorithm typically takes O(log n) time where 'log n' is the number of bits
 * required to represent the maximum heap_index. In the worst case, the algo
 * can take O((log n)^2) - check prio_tree_expand.
 *
 * If a prior node with same radix_index and heap_index is already found in
 * the tree, then returns the address of the prior node. Otherwise, inserts
 * @node into the tree and returns @node.
 */
struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
		struct prio_tree_node *node)
{
	struct prio_tree_node *cur, *res = node;
	unsigned long radix_index, heap_index;
	unsigned long r_index, h_index, index, mask;
	int size_flag = 0;

	get_index(root, node, &radix_index, &heap_index);

	if (prio_tree_empty(root) ||
			heap_index > prio_tree_maxindex(root->index_bits))
		return prio_tree_expand(root, node, heap_index);

	cur = root->prio_tree_node;
	mask = 1UL << (root->index_bits - 1);

	while (mask) {
		get_index(root, cur, &r_index, &h_index);

		if (r_index == radix_index && h_index == heap_index)
			return cur;

                if (h_index < heap_index ||
		    (h_index == heap_index && r_index > radix_index)) {
			struct prio_tree_node *tmp = node;
			node = prio_tree_replace(root, cur, node);
			cur = tmp;
			/* swap indices */
			index = r_index;
			r_index = radix_index;
			radix_index = index;
			index = h_index;
			h_index = heap_index;
			heap_index = index;
		}

		if (size_flag)
			index = heap_index - radix_index;
		else
			index = radix_index;

		if (index & mask) {
			if (prio_tree_right_empty(cur)) {
				INIT_PRIO_TREE_NODE(node);
				prio_set_parent(cur, node, false);
				return res;
			} else
				cur = cur->right;
		} else {
			if (prio_tree_left_empty(cur)) {
				INIT_PRIO_TREE_NODE(node);
				prio_set_parent(cur, node, true);
				return res;
			} else
				cur = cur->left;
		}

		mask >>= 1;

		if (!mask) {
			mask = 1UL << (BITS_PER_LONG - 1);
			size_flag = 1;
		}
	}
	/* Should not reach here */
	BUG();
	return NULL;
}
Esempio n. 17
0
static void
print_keyrec(int number,struct keyrec *keyrec)
{
  int i;

  iobuf_writebyte(keyrec->uidbuf,0);
  iobuf_flush_temp(keyrec->uidbuf);
  es_printf ("(%d)\t%s  ", number, iobuf_get_temp_buffer (keyrec->uidbuf));

  if (keyrec->size>0)
    es_printf ("%d bit ", keyrec->size);

  if(keyrec->type)
    {
      const char *str = gcry_pk_algo_name (keyrec->type);

      if(str)
	es_printf ("%s ",str);
      else
	es_printf ("unknown ");
    }

  switch(keyrec->desc.mode)
    {
      /* If the keyserver helper gave us a short keyid, we have no
	 choice but to use it.  Do check --keyid-format to add a 0x if
	 needed. */
    case KEYDB_SEARCH_MODE_SHORT_KID:
      es_printf ("key %s%08lX",
                 (opt.keyid_format==KF_0xSHORT
                  || opt.keyid_format==KF_0xLONG)?"0x":"",
                 (ulong)keyrec->desc.u.kid[1]);
      break;

      /* However, if it gave us a long keyid, we can honor
	 --keyid-format */
    case KEYDB_SEARCH_MODE_LONG_KID:
      es_printf ("key %s",keystr(keyrec->desc.u.kid));
      break;

    case KEYDB_SEARCH_MODE_FPR16:
      es_printf ("key ");
      for(i=0;i<16;i++)
	es_printf ("%02X",keyrec->desc.u.fpr[i]);
      break;

    case KEYDB_SEARCH_MODE_FPR20:
      es_printf ("key ");
      for(i=0;i<20;i++)
	es_printf ("%02X", keyrec->desc.u.fpr[i]);
      break;

    default:
      BUG();
      break;
    }

  if(keyrec->createtime>0)
    {
      es_printf (", ");
      es_printf (_("created: %s"), strtimestamp(keyrec->createtime));
    }

  if(keyrec->expiretime>0)
    {
      es_printf (", ");
      es_printf (_("expires: %s"), strtimestamp(keyrec->expiretime));
    }

  if (keyrec->flags&1)
    es_printf (" (%s)", _("revoked"));
  if(keyrec->flags&2)
    es_printf (" (%s)", _("disabled"));
  if(keyrec->flags&4)
    es_printf (" (%s)", _("expired"));

  es_printf ("\n");
}
Esempio n. 18
0
static void __init gic_dist_init(struct gic_chip_data *gic,
	unsigned int irq_start)
{
	unsigned int gic_irqs, irq_limit, i;
	u32 cpumask;
	void __iomem *base = gic->dist_base;
	u32 cpu = 0;
	u32 nrppis = 0, ppi_base = 0;

#ifdef CONFIG_SMP
	cpu = cpu_logical_map(smp_processor_id());
#endif

	cpumask = 1 << cpu;
	cpumask |= cpumask << 8;
	cpumask |= cpumask << 16;

	writel_relaxed(0, base + GIC_DIST_CTRL);

	/*
	 * Find out how many interrupts are supported.
	 * The GIC only supports up to 1020 interrupt sources.
	 */
	gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
	gic_irqs = (gic_irqs + 1) * 32;
	if (gic_irqs > 1020)
		gic_irqs = 1020;

	gic->gic_irqs = gic_irqs;

	/*
	 * Nobody would be insane enough to use PPIs on a secondary
	 * GIC, right?
	 */
	if (gic == &gic_data[0]) {
		nrppis = (32 - irq_start) & 31;

		/* The GIC only supports up to 16 PPIs. */
		if (nrppis > 16)
			BUG();

		ppi_base = gic->irq_offset + 32 - nrppis;
	}

	pr_info("Configuring GIC with %d sources (%d PPIs)\n",
		gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);

	/*
	 * Set all global interrupts to be level triggered, active low.
	 */
	for (i = 32; i < gic_irqs; i += 16)
		writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);

	/*
	 * Set all global interrupts to this CPU only.
	 */
	for (i = 32; i < gic_irqs; i += 4)
		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);

	/*
	 * Set priority on all global interrupts.
	 */
	for (i = 32; i < gic_irqs; i += 4)
		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);

	/*
	 * Disable all interrupts.  Leave the PPI and SGIs alone
	 * as these enables are banked registers.
	 */
	for (i = 32; i < gic_irqs; i += 32)
		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);

	/*
	 * Limit number of interrupts registered to the platform maximum
	 */
	irq_limit = gic->irq_offset + gic_irqs;
	if (WARN_ON(irq_limit > NR_IRQS))
		irq_limit = NR_IRQS;

	/*
	 * Setup the Linux IRQ subsystem.
	 */
	for (i = 0; i < nrppis; i++) {
		int ppi = i + ppi_base;

		irq_set_percpu_devid(ppi);
		irq_set_chip_and_handler(ppi, &gic_chip,
					 handle_percpu_devid_irq);
		irq_set_chip_data(ppi, gic);
		set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
	}

	for (i = irq_start + nrppis; i < irq_limit; i++) {
		irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
		irq_set_chip_data(i, gic);
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
	}

	writel_relaxed(1, base + GIC_DIST_CTRL);
}
Esempio n. 19
0
File: irq.c Progetto: 168519/linux
static int bcm63xx_external_irq_set_type(struct irq_data *d,
					 unsigned int flow_type)
{
	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
	u32 reg, regaddr;
	int levelsense, sense, bothedge;
	unsigned long flags;

	flow_type &= IRQ_TYPE_SENSE_MASK;

	if (flow_type == IRQ_TYPE_NONE)
		flow_type = IRQ_TYPE_LEVEL_LOW;

	levelsense = sense = bothedge = 0;
	switch (flow_type) {
	case IRQ_TYPE_EDGE_BOTH:
		bothedge = 1;
		break;

	case IRQ_TYPE_EDGE_RISING:
		sense = 1;
		break;

	case IRQ_TYPE_EDGE_FALLING:
		break;

	case IRQ_TYPE_LEVEL_HIGH:
		levelsense = 1;
		sense = 1;
		break;

	case IRQ_TYPE_LEVEL_LOW:
		levelsense = 1;
		break;

	default:
		printk(KERN_ERR "bogus flow type combination given !\n");
		return -EINVAL;
	}

	regaddr = get_ext_irq_perf_reg(irq);
	spin_lock_irqsave(&epic_lock, flags);
	reg = bcm_perf_readl(regaddr);
	irq %= 4;

	switch (bcm63xx_get_cpu_id()) {
	case BCM6348_CPU_ID:
		if (levelsense)
			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
		else
			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
		if (sense)
			reg |= EXTIRQ_CFG_SENSE_6348(irq);
		else
			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
		if (bothedge)
			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
		else
			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
		break;

	case BCM3368_CPU_ID:
	case BCM6328_CPU_ID:
	case BCM6338_CPU_ID:
	case BCM6345_CPU_ID:
	case BCM6358_CPU_ID:
	case BCM6362_CPU_ID:
	case BCM6368_CPU_ID:
		if (levelsense)
			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
		else
			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
		if (sense)
			reg |= EXTIRQ_CFG_SENSE(irq);
		else
			reg &= ~EXTIRQ_CFG_SENSE(irq);
		if (bothedge)
			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
		else
			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
		break;
	default:
		BUG();
	}

	bcm_perf_writel(reg, regaddr);
	spin_unlock_irqrestore(&epic_lock, flags);

	irqd_set_trigger_type(d, flow_type);
	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
		__irq_set_handler_locked(d->irq, handle_level_irq);
	else
		__irq_set_handler_locked(d->irq, handle_edge_irq);

	return IRQ_SET_MASK_OK_NOCOPY;
}
Esempio n. 20
0
static void
write_header( cipher_filter_context_t *cfx, IOBUF a )
{
    gcry_error_t err;
    PACKET pkt;
    PKT_encrypted ed;
    byte temp[18];
    unsigned int blocksize;
    unsigned int nprefix;

    blocksize = openpgp_cipher_get_algo_blklen (cfx->dek->algo);
    if ( blocksize < 8 || blocksize > 16 )
	log_fatal("unsupported blocksize %u\n", blocksize );

    memset( &ed, 0, sizeof ed );
    ed.len = cfx->datalen;
    ed.extralen = blocksize+2;
    ed.new_ctb = !ed.len;
    if( cfx->dek->use_mdc ) {
	ed.mdc_method = DIGEST_ALGO_SHA1;
	gcry_md_open (&cfx->mdc_hash, DIGEST_ALGO_SHA1, 0);
	if ( DBG_HASHING )
	    gcry_md_debug (cfx->mdc_hash, "creatmdc");
    }

    {
        char buf[20];

        sprintf (buf, "%d %d", ed.mdc_method, cfx->dek->algo);
        write_status_text (STATUS_BEGIN_ENCRYPTION, buf);
    }

    init_packet( &pkt );
    pkt.pkttype = cfx->dek->use_mdc? PKT_ENCRYPTED_MDC : PKT_ENCRYPTED;
    pkt.pkt.encrypted = &ed;
    if( build_packet( a, &pkt ))
	log_bug("build_packet(ENCR_DATA) failed\n");
    nprefix = blocksize;
    gcry_randomize (temp, nprefix, GCRY_STRONG_RANDOM );
    temp[nprefix] = temp[nprefix-2];
    temp[nprefix+1] = temp[nprefix-1];
    print_cipher_algo_note( cfx->dek->algo );
    err = openpgp_cipher_open (&cfx->cipher_hd,
			       cfx->dek->algo,
			       GCRY_CIPHER_MODE_CFB,
			       (GCRY_CIPHER_SECURE
				| ((cfx->dek->use_mdc || cfx->dek->algo >= 100)?
				   0 : GCRY_CIPHER_ENABLE_SYNC)));
    if (err) {
	/* We should never get an error here cause we already checked,
	 * that the algorithm is available.  */
	BUG();
    }


/*   log_hexdump( "thekey", cfx->dek->key, cfx->dek->keylen );*/
    gcry_cipher_setkey( cfx->cipher_hd, cfx->dek->key, cfx->dek->keylen );
    gcry_cipher_setiv( cfx->cipher_hd, NULL, 0 );
/*  log_hexdump( "prefix", temp, nprefix+2 ); */
    if (cfx->mdc_hash) /* Hash the "IV". */
	gcry_md_write (cfx->mdc_hash, temp, nprefix+2 );
    gcry_cipher_encrypt (cfx->cipher_hd, temp, nprefix+2, NULL, 0);
    gcry_cipher_sync (cfx->cipher_hd);
    iobuf_write(a, temp, nprefix+2);
    cfx->header=1;
}
/****************
 * Protect the secret key with the passphrase from DEK
 */
int
protect_secret_key( PKT_secret_key *sk, DEK *dek )
{
    int i,j, rc = 0;
    byte *buffer;
    size_t nbytes;
    u16 csum;

    if( !dek )
	return 0;

    if( !sk->is_protected ) { /* okay, apply the protection */
	gcry_cipher_hd_t cipher_hd=NULL;

	if ( openpgp_cipher_test_algo ( sk->protect.algo ) ) {
            /* Unsupport protection algorithm. */
            rc = gpg_error (GPG_ERR_CIPHER_ALGO);
        }
	else {
	    print_cipher_algo_note( sk->protect.algo );

	    if ( openpgp_cipher_open (&cipher_hd, sk->protect.algo,
				      GCRY_CIPHER_MODE_CFB,
				      (GCRY_CIPHER_SECURE
				       | (sk->protect.algo >= 100 ?
					  0 : GCRY_CIPHER_ENABLE_SYNC))) )
              BUG();
	    if ( gcry_cipher_setkey ( cipher_hd, dek->key, dek->keylen ) )
		log_info(_("WARNING: Weak key detected"
			   " - please change passphrase again.\n"));
	    sk->protect.ivlen = openpgp_cipher_get_algo_blklen (sk->protect.algo);
	    assert( sk->protect.ivlen <= DIM(sk->protect.iv) );
	    if( sk->protect.ivlen != 8 && sk->protect.ivlen != 16 )
		BUG(); /* yes, we are very careful */
	    gcry_create_nonce (sk->protect.iv, sk->protect.ivlen);
	    gcry_cipher_setiv (cipher_hd, sk->protect.iv, sk->protect.ivlen);
	    if( sk->version >= 4 ) {
                byte *bufarr[PUBKEY_MAX_NSKEY];
		size_t narr[PUBKEY_MAX_NSKEY];
		unsigned int nbits[PUBKEY_MAX_NSKEY];
		int ndata=0;
		byte *p, *data;

		for (j=0, i = pubkey_get_npkey(sk->pubkey_algo);
			i < pubkey_get_nskey(sk->pubkey_algo); i++, j++ )
                  {
		    assert (!gcry_mpi_get_flag (sk->skey[i],
                                                GCRYMPI_FLAG_OPAQUE));
		    if (gcry_mpi_aprint (GCRYMPI_FMT_USG, bufarr+j,
                                         narr+j, sk->skey[i]))
                      BUG();
		    nbits[j] = gcry_mpi_get_nbits (sk->skey[i]);
		    ndata += narr[j] + 2;
                  }
		for ( ; j < PUBKEY_MAX_NSKEY; j++ )
                  bufarr[j] = NULL;

		ndata += opt.simple_sk_checksum? 2 : 20; /* for checksum */

		data = xmalloc_secure( ndata );
		p = data;
		for(j=0; j < PUBKEY_MAX_NSKEY && bufarr[j]; j++ ) {
		    p[0] = nbits[j] >> 8 ;
		    p[1] = nbits[j];
		    p += 2;
		    memcpy(p, bufarr[j], narr[j] );
		    p += narr[j];
		    xfree(bufarr[j]);
		}

                if (opt.simple_sk_checksum) {
                    log_info (_("generating the deprecated 16-bit checksum"
                              " for secret key protection\n"));
                    csum = checksum( data, ndata-2);
                    sk->csum = csum;
                    *p++ =	csum >> 8;
                    *p++ =	csum;
                    sk->protect.sha1chk = 0;
                }
                else {
Esempio n. 22
0
static void proc_read(void)
{
	struct element *e;
	FILE *fd;
	char buf[512], *p, *s, *unused __unused__;
	int w;
	
	if (!(fd = fopen(c_path, "r")))
		quit("Unable to open file %s: %s\n", c_path, strerror(errno));

	/* Ignore header */
	unused = fgets(buf, sizeof(buf), fd);
	unused = fgets(buf, sizeof(buf), fd);
	
	for (; fgets(buf, sizeof(buf), fd);) {
		uint64_t data[NUM_PROC_VALUE][2];
		int i;
		
		if (buf[0] == '\r' || buf[0] == '\n')
			continue;

		if (!(p = strchr(buf, ':')))
			continue;
		*p = '\0';
		s = (p + 1);
		
		for (p = &buf[0]; *p == ' '; p++);

		w = sscanf(s, "%" SCNu64 " %" SCNu64 " %" SCNu64 " %" SCNu64 " "
			      "%" SCNu64 " %" SCNu64 " %" SCNu64 " %" SCNu64 " "
			      "%" SCNu64 " %" SCNu64 " %" SCNu64 " %" SCNu64 " "
			      "%" SCNu64 " %" SCNu64 " %" SCNu64 " %" SCNu64
			      "\n",
			      &data[PROC_BYTES][0],
			      &data[PROC_PACKETS][0],
			      &data[PROC_ERRORS][0],
			      &data[PROC_DROP][0],
			      &data[PROC_FIFO][0],
			      &data[PROC_FRAME][0],
			      &data[PROC_COMPRESSED][0],
			      &data[PROC_MCAST][0],
			      &data[PROC_BYTES][1],
			      &data[PROC_PACKETS][1],
			      &data[PROC_ERRORS][1],
			      &data[PROC_DROP][1],
			      &data[PROC_FIFO][1],
			      &data[PROC_FRAME][1],
			      &data[PROC_COMPRESSED][1],
			      &data[PROC_MCAST][1]);

		if (w != 16)
			continue;

		if (!(e = element_lookup(grp, p, 0, NULL, ELEMENT_CREAT)))
			goto skip;

		if (e->e_flags & ELEMENT_FLAG_CREATED) {
			if (element_set_key_attr(e, "bytes", "packets") ||
			    element_set_usage_attr(e, "bytes"))
				BUG();

			e->e_flags &= ~ELEMENT_FLAG_CREATED;
		}

		for (i = 0; i < ARRAY_SIZE(link_attrs); i++) {
			struct attr_map *m = &link_attrs[i];

			attr_update(e, m->attrid, data[i][0], data[i][1],
				    UPDATE_FLAG_RX | UPDATE_FLAG_TX);
		}

		element_notify_update(e, NULL);
		element_lifesign(e, 1);
	}
skip:
	fclose(fd);
}
Esempio n. 23
0
/**
 * sync_mft_mirror - synchronize an mft record to the mft mirror
 * @ni:		ntfs inode whose mft record to synchronize
 * @m:		mapped, mst protected (extent) mft record to synchronize
 * @sync:	if true, wait for i/o completion
 *
 * Write the mapped, mst protected (extent) mft record @m described by the
 * (regular or extent) ntfs inode @ni to the mft mirror ($MFTMirr).
 *
 * On success return 0.  On error return -errno and set the volume errors flag
 * in the ntfs_volume to which @ni belongs.
 *
 * NOTE:  We always perform synchronous i/o and ignore the @sync parameter.
 *
 * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just
 * schedule i/o via ->writepage or do it via kntfsd or whatever.
 */
static int sync_mft_mirror(ntfs_inode *ni, MFT_RECORD *m, int sync)
{
	ntfs_volume *vol = ni->vol;
	struct page *page;
	unsigned int blocksize = vol->sb->s_blocksize;
	int max_bhs = vol->mft_record_size / blocksize;
	struct buffer_head *bhs[max_bhs];
	struct buffer_head *bh, *head;
	u8 *kmirr;
	unsigned int block_start, block_end, m_start, m_end;
	int i_bhs, nr_bhs, err = 0;

	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
	BUG_ON(!max_bhs);
	if (unlikely(!vol->mftmirr_ino)) {
		/* This could happen during umount... */
		err = sync_mft_mirror_umount(ni, m);
		if (likely(!err))
			return err;
		goto err_out;
	}
	/* Get the page containing the mirror copy of the mft record @m. */
	page = ntfs_map_page(vol->mftmirr_ino->i_mapping, ni->mft_no >>
			(PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
	if (unlikely(IS_ERR(page))) {
		ntfs_error(vol->sb, "Failed to map mft mirror page.");
		err = PTR_ERR(page);
		goto err_out;
	}
	/*
	 * Exclusion against other writers.   This should never be a problem
	 * since the page in which the mft record @m resides is also locked and
	 * hence any other writers would be held up there but it is better to
	 * make sure no one is writing from elsewhere.
	 */
	lock_page(page);
	/* The address in the page of the mirror copy of the mft record @m. */
	kmirr = page_address(page) + ((ni->mft_no << vol->mft_record_size_bits)
			& ~PAGE_CACHE_MASK);
	/* Copy the mst protected mft record to the mirror. */
	memcpy(kmirr, m, vol->mft_record_size);
	/* Make sure we have mapped buffers. */
	if (!page_has_buffers(page)) {
no_buffers_err_out:
		ntfs_error(vol->sb, "Writing mft mirror records without "
				"existing buffers is not implemented yet.  %s",
				ntfs_please_email);
		err = -EOPNOTSUPP;
		goto unlock_err_out;
	}
	bh = head = page_buffers(page);
	if (!bh)
		goto no_buffers_err_out;
	nr_bhs = 0;
	block_start = 0;
	m_start = kmirr - (u8*)page_address(page);
	m_end = m_start + vol->mft_record_size;
	do {
		block_end = block_start + blocksize;
		/*
		 * If the buffer is outside the mft record, just skip it,
		 * clearing it if it is dirty to make sure it is not written
		 * out.  It should never be marked dirty but better be safe.
		 */
		if ((block_end <= m_start) || (block_start >= m_end)) {
			if (buffer_dirty(bh)) {
				ntfs_warning(vol->sb, "Clearing dirty mft "
						"record page buffer.  %s",
						ntfs_please_email);
				clear_buffer_dirty(bh);
			}
			continue;
		}
		if (!buffer_mapped(bh)) {
			ntfs_error(vol->sb, "Writing mft mirror records "
					"without existing mapped buffers is "
					"not implemented yet.  %s",
					ntfs_please_email);
			err = -EOPNOTSUPP;
			continue;
		}
		if (!buffer_uptodate(bh)) {
			ntfs_error(vol->sb, "Writing mft mirror records "
					"without existing uptodate buffers is "
					"not implemented yet.  %s",
					ntfs_please_email);
			err = -EOPNOTSUPP;
			continue;
		}
		BUG_ON(!nr_bhs && (m_start != block_start));
		BUG_ON(nr_bhs >= max_bhs);
		bhs[nr_bhs++] = bh;
		BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
	} while (block_start = block_end, (bh = bh->b_this_page) != head);
	if (likely(!err)) {
		/* Lock buffers and start synchronous write i/o on them. */
		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
			struct buffer_head *tbh = bhs[i_bhs];

			if (unlikely(test_set_buffer_locked(tbh)))
				BUG();
			BUG_ON(!buffer_uptodate(tbh));
			if (buffer_dirty(tbh))
				clear_buffer_dirty(tbh);
			get_bh(tbh);
			tbh->b_end_io = end_buffer_write_sync;
			submit_bh(WRITE, tbh);
		}
		/* Wait on i/o completion of buffers. */
		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
			struct buffer_head *tbh = bhs[i_bhs];

			wait_on_buffer(tbh);
			if (unlikely(!buffer_uptodate(tbh))) {
				err = -EIO;
				/*
				 * Set the buffer uptodate so the page & buffer
				 * states don't become out of sync.
				 */
				if (PageUptodate(page))
					set_buffer_uptodate(tbh);
			}
		}
	} else /* if (unlikely(err)) */ {
		/* Clean the buffers. */
		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
			clear_buffer_dirty(bhs[i_bhs]);
	}
unlock_err_out:
	/* Current state: all buffers are clean, unlocked, and uptodate. */
	/* Remove the mst protection fixups again. */
	post_write_mst_fixup((NTFS_RECORD*)kmirr);
	flush_dcache_page(page);
	unlock_page(page);
	ntfs_unmap_page(page);
	if (unlikely(err)) {
		/* I/O error during writing.  This is really bad! */
		ntfs_error(vol->sb, "I/O error while writing mft mirror "
				"record 0x%lx!  You should unmount the volume "
				"and run chkdsk or ntfsfix.", ni->mft_no);
		goto err_out;
	}
	ntfs_debug("Done.");
	return 0;
err_out:
	ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error code %i).  "
			"Volume will be left marked dirty on umount.  Run "
			"ntfsfix on the partition after umounting to correct "
			"this.", -err);
	/* We don't want to clear the dirty bit on umount. */
	NVolSetErrors(vol);
	return err;
}
Esempio n. 24
0
/**
 * blk_free_tags - release a given set of tag maintenance info
 * @bqt:	the tag map to free
 *
 * For externally managed @bqt frees the map.  Callers of this
 * function must guarantee to have released all the queues that
 * might have been using this tag map.
 */
void blk_free_tags(struct blk_queue_tag *bqt)
{
	if (unlikely(!__blk_free_tags(bqt)))
		BUG();
}
Esempio n. 25
0
/* Send RST reply */
static void send_reset(struct net *net, struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	unsigned int otcplen, hh_len;
	int tcphoff, needs_ack;
	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
	struct ipv6hdr *ip6h;
#define DEFAULT_TOS_VALUE	0x0U
	const __u8 tclass = DEFAULT_TOS_VALUE;
	struct dst_entry *dst = NULL;
	u8 proto;
	struct flowi6 fl6;

	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
		pr_debug("addr is not unicast.\n");
		return;
	}

	proto = oip6h->nexthdr;
	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);

	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
		pr_debug("Cannot get TCP header.\n");
		return;
	}

	otcplen = oldskb->len - tcphoff;

	/* IP header checks: fragment, too short. */
	if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
		pr_debug("proto(%d) != IPPROTO_TCP, "
			 "or too short. otcplen = %d\n",
			 proto, otcplen);
		return;
	}

	if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
		BUG();

	/* No RST for RST. */
	if (otcph.rst) {
		pr_debug("RST is set\n");
		return;
	}

	/* Check checksum. */
	if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
		pr_debug("TCP checksum is invalid\n");
		return;
	}

	memset(&fl6, 0, sizeof(fl6));
	fl6.flowi6_proto = IPPROTO_TCP;
	ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
	ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
	fl6.fl6_sport = otcph.dest;
	fl6.fl6_dport = otcph.source;
	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
	dst = ip6_route_output(net, NULL, &fl6);
	if (dst == NULL || dst->error) {
		dst_release(dst);
		return;
	}
	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
	if (IS_ERR(dst))
		return;

	hh_len = (dst->dev->hard_header_len + 15)&~15;
	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
			 + sizeof(struct tcphdr) + dst->trailer_len,
			 GFP_ATOMIC);

	if (!nskb) {
		if (net_ratelimit())
			pr_debug("cannot alloc skb\n");
		dst_release(dst);
		return;
	}

	skb_dst_set(nskb, dst);

	skb_reserve(nskb, hh_len + dst->header_len);

	skb_put(nskb, sizeof(struct ipv6hdr));
	skb_reset_network_header(nskb);
	ip6h = ipv6_hdr(nskb);
	*(__be32 *)ip6h =  htonl(0x60000000 | (tclass << 20));
	ip6h->hop_limit = ip6_dst_hoplimit(dst);
	ip6h->nexthdr = IPPROTO_TCP;
	ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
	ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);

	skb_reset_transport_header(nskb);
	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	tcph->source = otcph.dest;
	tcph->dest = otcph.source;

	if (otcph.ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + otcplen - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;
	tcph->window = 0;
	tcph->urg_ptr = 0;
	tcph->check = 0;

	/* Adjust TCP checksum */
	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
				      &ipv6_hdr(nskb)->daddr,
				      sizeof(struct tcphdr), IPPROTO_TCP,
				      csum_partial(tcph,
						   sizeof(struct tcphdr), 0));

	nf_ct_attach(nskb, oldskb);

	ip6_local_out(nskb);
}
Esempio n. 26
0
int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
				unsigned long count, uint32_t ofs)
{
	union jffs2_node_union *node;
	struct jffs2_eraseblock *jeb;

	if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) {
		dbg_summary("Summary is disabled for this jeb! Skipping summary info!\n");
		return 0;
	}

	node = invecs[0].iov_base;
	jeb = &c->blocks[ofs / c->sector_size];
	ofs -= jeb->offset;

	switch (je16_to_cpu(node->u.nodetype)) {
		case JFFS2_NODETYPE_INODE: {
			struct jffs2_sum_inode_mem *temp =
				kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);

			if (!temp)
				goto no_mem;

			temp->nodetype = node->i.nodetype;
			temp->inode = node->i.ino;
			temp->version = node->i.version;
			temp->offset = cpu_to_je32(ofs);
			temp->totlen = node->i.totlen;
			temp->next = NULL;

			return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
		}

		case JFFS2_NODETYPE_DIRENT: {
			struct jffs2_sum_dirent_mem *temp =
				kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL);

			if (!temp)
				goto no_mem;

			temp->nodetype = node->d.nodetype;
			temp->totlen = node->d.totlen;
			temp->offset = cpu_to_je32(ofs);
			temp->pino = node->d.pino;
			temp->version = node->d.version;
			temp->ino = node->d.ino;
			temp->nsize = node->d.nsize;
			temp->type = node->d.type;
			temp->next = NULL;

			switch (count) {
				case 1:
					memcpy(temp->name,node->d.name,node->d.nsize);
					break;

				case 2:
					memcpy(temp->name,invecs[1].iov_base,node->d.nsize);
					break;

				default:
					BUG();	/* impossible count value */
					break;
			}

			return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
		}
#ifdef CONFIG_JFFS2_FS_XATTR
		case JFFS2_NODETYPE_XATTR: {
			struct jffs2_sum_xattr_mem *temp;
			temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
			if (!temp)
				goto no_mem;

			temp->nodetype = node->x.nodetype;
			temp->xid = node->x.xid;
			temp->version = node->x.version;
			temp->totlen = node->x.totlen;
			temp->offset = cpu_to_je32(ofs);
			temp->next = NULL;

			return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
		}
		case JFFS2_NODETYPE_XREF: {
			struct jffs2_sum_xref_mem *temp;
			temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
			if (!temp)
				goto no_mem;
			temp->nodetype = node->r.nodetype;
			temp->offset = cpu_to_je32(ofs);
			temp->next = NULL;

			return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
		}
#endif
		case JFFS2_NODETYPE_PADDING:
			dbg_summary("node PADDING\n");
			c->summary->sum_padded += je32_to_cpu(node->u.totlen);
			break;

		case JFFS2_NODETYPE_CLEANMARKER:
			dbg_summary("node CLEANMARKER\n");
			break;

		case JFFS2_NODETYPE_SUMMARY:
			dbg_summary("node SUMMARY\n");
			break;

		default:
			/* If you implement a new node type you should also implement
			   summary support for it or disable summary.
			*/
			BUG();
			break;
	}

	return 0;

no_mem:
	JFFS2_WARNING("MEMORY ALLOCATION ERROR!");
	return -ENOMEM;
}
Esempio n. 27
0
/* 
 * Search through the keyring(s), starting at the current position,
 * for a keyblock which contains one of the keys described in the DESC array.
 */
int 
keyring_search (KEYRING_HANDLE hd, KEYDB_SEARCH_DESC *desc,
		size_t ndesc, size_t *descindex)
{
  int rc;
  PACKET pkt;
  int save_mode;
  off_t offset, main_offset;
  size_t n;
  int need_uid, need_words, need_keyid, need_fpr, any_skip;
  int pk_no, uid_no;
  int initial_skip;
  int use_offtbl;
  PKT_user_id *uid = NULL;
  PKT_public_key *pk = NULL;
  PKT_secret_key *sk = NULL;
  u32 aki[2];

  /* figure out what information we need */
  need_uid = need_words = need_keyid = need_fpr = any_skip = 0;
  for (n=0; n < ndesc; n++) 
    {
      switch (desc[n].mode) 
        {
        case KEYDB_SEARCH_MODE_EXACT: 
        case KEYDB_SEARCH_MODE_SUBSTR:
        case KEYDB_SEARCH_MODE_MAIL:
        case KEYDB_SEARCH_MODE_MAILSUB:
        case KEYDB_SEARCH_MODE_MAILEND:
          need_uid = 1;
          break;
        case KEYDB_SEARCH_MODE_WORDS: 
          need_uid = 1;
          need_words = 1;
          break;
        case KEYDB_SEARCH_MODE_SHORT_KID: 
        case KEYDB_SEARCH_MODE_LONG_KID:
          need_keyid = 1;
          break;
        case KEYDB_SEARCH_MODE_FPR16: 
        case KEYDB_SEARCH_MODE_FPR20:
        case KEYDB_SEARCH_MODE_FPR: 
          need_fpr = 1;
          break;
        case KEYDB_SEARCH_MODE_FIRST:
          /* always restart the search in this mode */
          keyring_search_reset (hd);
          break;
        default: break;
	}
      if (desc[n].skipfnc) 
        {
          any_skip = 1;
          need_keyid = 1;
        }
    }

  rc = prepare_search (hd);
  if (rc)
    return rc;

  use_offtbl = !hd->secret && kr_offtbl;
  if (!use_offtbl)
    ;
  else if (!kr_offtbl_ready)
    need_keyid = 1;
  else if (ndesc == 1 && desc[0].mode == KEYDB_SEARCH_MODE_LONG_KID)
    {
      struct off_item *oi;
            
      oi = lookup_offset_hash_table (kr_offtbl, desc[0].u.kid);
      if (!oi)
        { /* We know that we don't have this key */
          hd->found.kr = NULL;
          hd->current.eof = 1;
          return -1;
        }
      /* We could now create a positive search status and return.
       * However the problem is that another instance of gpg may 
       * have changed the keyring so that the offsets are not valid
       * anymore - therefore we don't do it 
       */
    }

  if (need_words)
    {
      const char *name = NULL;

      log_debug ("word search mode does not yet work\n");
      /* FIXME: here is a long standing bug in our function and in addition we
         just use the first search description */
      for (n=0; n < ndesc && !name; n++) 
        {
          if (desc[n].mode == KEYDB_SEARCH_MODE_WORDS) 
            name = desc[n].u.name;
        }
      assert (name);
      if ( !hd->word_match.name || strcmp (hd->word_match.name, name) ) 
        {
          /* name changed */
          xfree (hd->word_match.name);
          xfree (hd->word_match.pattern);
          hd->word_match.name = xstrdup (name);
          hd->word_match.pattern = prepare_word_match (name);
        }
      /*  name = hd->word_match.pattern; */
    }

  init_packet(&pkt);
  save_mode = set_packet_list_mode(0);

  hd->found.kr = NULL;
  main_offset = 0;
  pk_no = uid_no = 0;
  initial_skip = 1; /* skip until we see the start of a keyblock */
  while (!(rc=search_packet (hd->current.iobuf, &pkt, &offset, need_uid))) 
    {
      byte afp[MAX_FINGERPRINT_LEN];
      size_t an;

      if (pkt.pkttype == PKT_PUBLIC_KEY  || pkt.pkttype == PKT_SECRET_KEY) 
        {
          main_offset = offset;
          pk_no = uid_no = 0;
          initial_skip = 0;
        }
      if (initial_skip) 
        {
          free_packet (&pkt);
          continue;
        }
	
      pk = NULL;
      sk = NULL;
      uid = NULL;
      if (   pkt.pkttype == PKT_PUBLIC_KEY
             || pkt.pkttype == PKT_PUBLIC_SUBKEY)
        {
          pk = pkt.pkt.public_key;
          ++pk_no;

          if (need_fpr) {
            fingerprint_from_pk (pk, afp, &an);
            while (an < 20) /* fill up to 20 bytes */
              afp[an++] = 0;
          }
          if (need_keyid)
            keyid_from_pk (pk, aki);

          if (use_offtbl && !kr_offtbl_ready)
            update_offset_hash_table (kr_offtbl, aki, main_offset);
        }
      else if (pkt.pkttype == PKT_USER_ID) 
        {
          uid = pkt.pkt.user_id;
          ++uid_no;
        }
      else if (    pkt.pkttype == PKT_SECRET_KEY
                   || pkt.pkttype == PKT_SECRET_SUBKEY) 
        {
          sk = pkt.pkt.secret_key;
          ++pk_no;

          if (need_fpr) {
            fingerprint_from_sk (sk, afp, &an);
            while (an < 20) /* fill up to 20 bytes */
              afp[an++] = 0;
          }
          if (need_keyid)
            keyid_from_sk (sk, aki);
            
        }

      for (n=0; n < ndesc; n++) 
        {
          switch (desc[n].mode) {
          case KEYDB_SEARCH_MODE_NONE: 
            BUG ();
            break;
          case KEYDB_SEARCH_MODE_EXACT: 
          case KEYDB_SEARCH_MODE_SUBSTR:
          case KEYDB_SEARCH_MODE_MAIL:
          case KEYDB_SEARCH_MODE_MAILSUB:
          case KEYDB_SEARCH_MODE_MAILEND:
          case KEYDB_SEARCH_MODE_WORDS: 
            if ( uid && !compare_name (desc[n].mode,
                                       desc[n].u.name,
                                       uid->name, uid->len)) 
              goto found;
            break;
                
          case KEYDB_SEARCH_MODE_SHORT_KID: 
            if ((pk||sk) && desc[n].u.kid[1] == aki[1])
              goto found;
            break;
          case KEYDB_SEARCH_MODE_LONG_KID:
            if ((pk||sk) && desc[n].u.kid[0] == aki[0]
                && desc[n].u.kid[1] == aki[1])
              goto found;
            break;
          case KEYDB_SEARCH_MODE_FPR16:
            if ((pk||sk) && !memcmp (desc[n].u.fpr, afp, 16))
              goto found;
            break;
          case KEYDB_SEARCH_MODE_FPR20:
          case KEYDB_SEARCH_MODE_FPR: 
            if ((pk||sk) && !memcmp (desc[n].u.fpr, afp, 20))
              goto found;
            break;
          case KEYDB_SEARCH_MODE_FIRST: 
            if (pk||sk)
              goto found;
            break;
          case KEYDB_SEARCH_MODE_NEXT: 
            if (pk||sk)
              goto found;
            break;
          default: 
            rc = G10ERR_INV_ARG;
            goto found;
          }
	}
      free_packet (&pkt);
      continue;
    found:
      /* Record which desc we matched on.  Note this value is only
	 meaningful if this function returns with no errors. */
      if(descindex)
	*descindex=n;
      for (n=any_skip?0:ndesc; n < ndesc; n++) 
        {
          if (desc[n].skipfnc
              && desc[n].skipfnc (desc[n].skipfncvalue, aki, uid))
            break;
        }
      if (n == ndesc)
        goto real_found;
      free_packet (&pkt);
    }
 real_found:
  if (!rc)
    {
      hd->found.offset = main_offset;
      hd->found.kr = hd->current.kr;
      hd->found.pk_no = (pk||sk)? pk_no : 0;
      hd->found.uid_no = uid? uid_no : 0;
    }
  else if (rc == -1)
    {
      hd->current.eof = 1;
      /* if we scanned all keyrings, we are sure that
       * all known key IDs are in our offtbl, mark that. */
      if (use_offtbl && !kr_offtbl_ready)
        {
          KR_NAME kr;
          
          /* First set the did_full_scan flag for this keyring (ignore
             secret keyrings) */
          for (kr=kr_names; kr; kr = kr->next)
            {
              if (!kr->secret && hd->resource == kr) 
                {
                  kr->did_full_scan = 1;
                  break;
                }
            }
          /* Then check whether all flags are set and if so, mark the
             offtbl ready */
          for (kr=kr_names; kr; kr = kr->next)
            {
              if (!kr->secret && !kr->did_full_scan) 
                break;
            }
          if (!kr)
            kr_offtbl_ready = 1;
        }
    }
  else 
    hd->current.error = rc;

  free_packet(&pkt);
  set_packet_list_mode(save_mode);
  return rc;
}
Esempio n. 28
0
static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
					uint32_t infosize, uint32_t datasize, int padsize)
{
	struct jffs2_raw_summary isum;
	union jffs2_sum_mem *temp;
	struct jffs2_sum_marker *sm;
	struct kvec vecs[2];
	uint32_t sum_ofs;
	void *wpage;
	int ret;
	size_t retlen;

	memset(c->summary->sum_buf, 0xff, datasize);
	memset(&isum, 0, sizeof(isum));

	isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
	isum.totlen = cpu_to_je32(infosize);
	isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4));
	isum.padded = cpu_to_je32(c->summary->sum_padded);
	isum.cln_mkr = cpu_to_je32(c->cleanmarker_size);
	isum.sum_num = cpu_to_je32(c->summary->sum_num);
	wpage = c->summary->sum_buf;

	while (c->summary->sum_num) {
		temp = c->summary->sum_list_head;

		switch (je16_to_cpu(temp->u.nodetype)) {
			case JFFS2_NODETYPE_INODE: {
				struct jffs2_sum_inode_flash *sino_ptr = wpage;

				sino_ptr->nodetype = temp->i.nodetype;
				sino_ptr->inode = temp->i.inode;
				sino_ptr->version = temp->i.version;
				sino_ptr->offset = temp->i.offset;
				sino_ptr->totlen = temp->i.totlen;

				wpage += JFFS2_SUMMARY_INODE_SIZE;

				break;
			}

			case JFFS2_NODETYPE_DIRENT: {
				struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;

				sdrnt_ptr->nodetype = temp->d.nodetype;
				sdrnt_ptr->totlen = temp->d.totlen;
				sdrnt_ptr->offset = temp->d.offset;
				sdrnt_ptr->pino = temp->d.pino;
				sdrnt_ptr->version = temp->d.version;
				sdrnt_ptr->ino = temp->d.ino;
				sdrnt_ptr->nsize = temp->d.nsize;
				sdrnt_ptr->type = temp->d.type;

				memcpy(sdrnt_ptr->name, temp->d.name,
							temp->d.nsize);

				wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize);

				break;
			}
#ifdef CONFIG_JFFS2_FS_XATTR
			case JFFS2_NODETYPE_XATTR: {
				struct jffs2_sum_xattr_flash *sxattr_ptr = wpage;

				temp = c->summary->sum_list_head;
				sxattr_ptr->nodetype = temp->x.nodetype;
				sxattr_ptr->xid = temp->x.xid;
				sxattr_ptr->version = temp->x.version;
				sxattr_ptr->offset = temp->x.offset;
				sxattr_ptr->totlen = temp->x.totlen;

				wpage += JFFS2_SUMMARY_XATTR_SIZE;
				break;
			}
			case JFFS2_NODETYPE_XREF: {
				struct jffs2_sum_xref_flash *sxref_ptr = wpage;

				temp = c->summary->sum_list_head;
				sxref_ptr->nodetype = temp->r.nodetype;
				sxref_ptr->offset = temp->r.offset;

				wpage += JFFS2_SUMMARY_XREF_SIZE;
				break;
			}
#endif
			default : {
				if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK)
				    == JFFS2_FEATURE_RWCOMPAT_COPY) {
					dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
						    je16_to_cpu(temp->u.nodetype));
					jffs2_sum_disable_collecting(c->summary);
				} else {
					BUG();	/* unknown node in summary information */
				}
			}
		}

		c->summary->sum_list_head = temp->u.next;
		kfree(temp);

		c->summary->sum_num--;
	}

	jffs2_sum_reset_collected(c->summary);

	wpage += padsize;

	sm = wpage;
	sm->offset = cpu_to_je32(c->sector_size - jeb->free_size);
	sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC);

	isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize));
	isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8));

	vecs[0].iov_base = &isum;
	vecs[0].iov_len = sizeof(isum);
	vecs[1].iov_base = c->summary->sum_buf;
	vecs[1].iov_len = datasize;

	sum_ofs = jeb->offset + c->sector_size - jeb->free_size;

	dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
		    sum_ofs);

	ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);

	if (ret || (retlen != infosize)) {

		JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n",
			      infosize, sum_ofs, ret, retlen);

		if (retlen) {
			/* Waste remaining space */
			spin_lock(&c->erase_completion_lock);
			jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL);
			spin_unlock(&c->erase_completion_lock);
		}

		c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;

		return 0;
	}

	spin_lock(&c->erase_completion_lock);
	jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL);
	spin_unlock(&c->erase_completion_lock);

	return 0;
}
Esempio n. 29
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout, u32 *value)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	void *waiter;
	int err = 0, check_count = 0, low_timeout = 0;

	if (value)
		*value = 0;

	BUG_ON(!syncpt_op(sp).update_min);
	if (!nvhost_syncpt_check_max(sp, id, thresh)) {
		dev_warn(&syncpt_to_dev(sp)->pdev->dev,
			"wait %d (%s) for (%d) wouldn't be met (max %d)\n",
			id, syncpt_op(sp).name(sp, id), thresh,
			nvhost_syncpt_read_max(sp, id));
		nvhost_debug_dump(syncpt_to_dev(sp));
		return -EINVAL;
	}

	/* first check cache */
	if (nvhost_syncpt_min_cmp(sp, id, thresh)) {
		if (value)
			*value = nvhost_syncpt_read_min(sp, id);
		return 0;
	}

	/* keep host alive */
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);

	if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
		/* try to read from register */
		u32 val = syncpt_op(sp).update_min(sp, id);
		if ((s32)(val - thresh) >= 0) {
			if (value)
				*value = val;
			goto done;
		}
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	waiter = nvhost_intr_alloc_waiter();
	if (!waiter) {
		err = -ENOMEM;
		goto done;
	}

	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
				waiter,
				&ref);
	if (err)
		goto done;

	err = -EAGAIN;
	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		int remain = wait_event_interruptible_timeout(wq,
						nvhost_syncpt_min_cmp(sp, id, thresh),
						check);
		if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
			if (value)
				*value = nvhost_syncpt_read_min(sp, id);
			err = 0;
			break;
		}
		if (remain < 0) {
			err = remain;
			break;
		}
		if (timeout != NVHOST_NO_TIMEOUT) {
			if (timeout < SYNCPT_CHECK_PERIOD) {
				/* Caller-specified timeout may be impractically low */
				low_timeout = timeout;
			}
			timeout -= check;
		}
		if (timeout) {
			dev_warn(&syncpt_to_dev(sp)->pdev->dev,
				"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
				 current->comm, id, syncpt_op(sp).name(sp, id),
				 thresh, timeout);

			syncpt_op(sp).debug(sp);
			if (check_count > MAX_STUCK_CHECK_COUNT) {
				if (low_timeout) {
					dev_warn(&syncpt_to_dev(sp)->pdev->dev,
						"is timeout %d too low?\n",
						low_timeout);
				}
				nvhost_debug_dump(syncpt_to_dev(sp));
				BUG();
			}
			check_count++;
		}
	}
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);

done:
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return err;
}
Esempio n. 30
0
/*
 * Instantiate the generic non-control parts of the device.
 */
static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
	struct wm8994_pdata *pdata = wm8994->dev->platform_data;
	struct regmap_config *regmap_config;
	const struct reg_default *regmap_patch = NULL;
	const char *devname;
	int ret, i, patch_regs = 0;
	int pulls = 0;

	dev_set_drvdata(wm8994->dev, wm8994);

	/* Add the on-chip regulators first for bootstrapping */
	ret = mfd_add_devices(wm8994->dev, -1,
			      wm8994_regulator_devs,
			      ARRAY_SIZE(wm8994_regulator_devs),
			      NULL, 0);
	if (ret != 0) {
		dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
		goto err;
	}

	switch (wm8994->type) {
	case WM1811:
		wm8994->num_supplies = ARRAY_SIZE(wm1811_main_supplies);
		break;
	case WM8994:
		wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
		break;
	case WM8958:
		wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies);
		break;
	default:
		BUG();
		goto err;
	}

	wm8994->supplies = devm_kzalloc(wm8994->dev,
					sizeof(struct regulator_bulk_data) *
					wm8994->num_supplies, GFP_KERNEL);
	if (!wm8994->supplies) {
		ret = -ENOMEM;
		goto err;
	}

	switch (wm8994->type) {
	case WM1811:
		for (i = 0; i < ARRAY_SIZE(wm1811_main_supplies); i++)
			wm8994->supplies[i].supply = wm1811_main_supplies[i];
		break;
	case WM8994:
		for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
			wm8994->supplies[i].supply = wm8994_main_supplies[i];
		break;
	case WM8958:
		for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++)
			wm8994->supplies[i].supply = wm8958_main_supplies[i];
		break;
	default:
		BUG();
		goto err;
	}
		
	ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
				 wm8994->supplies);
	if (ret != 0) {
		dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
		goto err;
	}

	ret = regulator_bulk_enable(wm8994->num_supplies,
				    wm8994->supplies);
	if (ret != 0) {
		dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
		goto err_get;
	}

	ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET);
	if (ret < 0) {
		dev_err(wm8994->dev, "Failed to read ID register\n");
		goto err_enable;
	}
	switch (ret) {
	case 0x1811:
		devname = "WM1811";
		if (wm8994->type != WM1811)
			dev_warn(wm8994->dev, "Device registered as type %d\n",
				 wm8994->type);
		wm8994->type = WM1811;
		break;
	case 0x8994:
		devname = "WM8994";
		if (wm8994->type != WM8994)
			dev_warn(wm8994->dev, "Device registered as type %d\n",
				 wm8994->type);
		wm8994->type = WM8994;
		break;
	case 0x8958:
		devname = "WM8958";
		if (wm8994->type != WM8958)
			dev_warn(wm8994->dev, "Device registered as type %d\n",
				 wm8994->type);
		wm8994->type = WM8958;
		break;
	default:
		dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n",
			ret);
		ret = -EINVAL;
		goto err_enable;
	}

	ret = wm8994_reg_read(wm8994, WM8994_CHIP_REVISION);
	if (ret < 0) {
		dev_err(wm8994->dev, "Failed to read revision register: %d\n",
			ret);
		goto err_enable;
	}
	wm8994->revision = ret;

	switch (wm8994->type) {
	case WM8994:
		switch (wm8994->revision) {
		case 0:
		case 1:
			dev_warn(wm8994->dev,
				 "revision %c not fully supported\n",
				 'A' + wm8994->revision);
			break;
		case 2:
		case 3:
			regmap_patch = wm8994_revc_patch;
			patch_regs = ARRAY_SIZE(wm8994_revc_patch);
			break;
		default:
			break;
		}
		break;

	case WM8958:
		switch (wm8994->revision) {
		case 0:
			regmap_patch = wm8958_reva_patch;
			patch_regs = ARRAY_SIZE(wm8958_reva_patch);
			break;
		default:
			break;
		}
		break;

	case WM1811:
		/* Revision C did not change the relevant layer */
		if (wm8994->revision > 1)
			wm8994->revision++;
		switch (wm8994->revision) {
		case 0:
		case 1:
		case 2:
		case 3:
			regmap_patch = wm1811_reva_patch;
			patch_regs = ARRAY_SIZE(wm1811_reva_patch);
			break;
		default:
			break;
		}
		break;

	default:
		break;
	}

	dev_info(wm8994->dev, "%s revision %c\n", devname,
		 'A' + wm8994->revision);

	switch (wm8994->type) {
	case WM1811:
		regmap_config = &wm1811_regmap_config;
		break;
	case WM8994:
		regmap_config = &wm8994_regmap_config;
		break;
	case WM8958:
		regmap_config = &wm8958_regmap_config;
		break;
	default:
		dev_err(wm8994->dev, "Unknown device type %d\n", wm8994->type);
		return -EINVAL;
	}

	ret = regmap_reinit_cache(wm8994->regmap, regmap_config);
	if (ret != 0) {
		dev_err(wm8994->dev, "Failed to reinit register cache: %d\n",
			ret);
		return ret;
	}

	if (regmap_patch) {
		ret = regmap_register_patch(wm8994->regmap, regmap_patch,
					    patch_regs);
		if (ret != 0) {
			dev_err(wm8994->dev, "Failed to register patch: %d\n",
				ret);
			goto err;
		}
	}

	if (pdata) {
		wm8994->irq_base = pdata->irq_base;
		wm8994->gpio_base = pdata->gpio_base;

		/* GPIO configuration is only applied if it's non-zero */
		for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
			if (pdata->gpio_defaults[i]) {
				wm8994_set_bits(wm8994, WM8994_GPIO_1 + i,
						0xffff,
						pdata->gpio_defaults[i]);
			}
		}

		wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;

		if (pdata->spkmode_pu)
			pulls |= WM8994_SPKMODE_PU;
	}

	/* Disable unneeded pulls */
	wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
			WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD |
			WM8994_SPKMODE_PU | WM8994_CSNADDR_PD,
			pulls);

	/* In some system designs where the regulators are not in use,
	 * we can achieve a small reduction in leakage currents by
	 * floating LDO outputs.  This bit makes no difference if the
	 * LDOs are enabled, it only affects cases where the LDOs were
	 * in operation and are then disabled.
	 */
	for (i = 0; i < WM8994_NUM_LDO_REGS; i++) {
		if (wm8994_ldo_in_use(pdata, i))
			wm8994_set_bits(wm8994, WM8994_LDO_1 + i,
					WM8994_LDO1_DISCH, WM8994_LDO1_DISCH);
		else
			wm8994_set_bits(wm8994, WM8994_LDO_1 + i,
					WM8994_LDO1_DISCH, 0);
	}

	wm8994_irq_init(wm8994);

	ret = mfd_add_devices(wm8994->dev, -1,
			      wm8994_devs, ARRAY_SIZE(wm8994_devs),
			      NULL, 0);
	if (ret != 0) {
		dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
		goto err_irq;
	}

	pm_runtime_enable(wm8994->dev);
	pm_runtime_idle(wm8994->dev);

	return 0;

err_irq:
	wm8994_irq_exit(wm8994);
err_enable:
	regulator_bulk_disable(wm8994->num_supplies,
			       wm8994->supplies);
err_get:
	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err:
	mfd_remove_devices(wm8994->dev);
	return ret;
}