Esempio n. 1
0
uint64_t
Floating_hash (Floating* self)
{
	if (CACHE(self)->hash) {
		return CACHE(self)->hash;
	}

	mpfr_exp_t exp;
	char*      string = mpfr_get_str(NULL, &exp, 32, 0, *self->value, MPFR_RNDN);
	size_t     size   = strlen(string);

	CACHE(self)->hash = SIPHASH(RUNTIME_FOR(self), string, size + 1) ^ ((VALUE_TYPE_FLOATING << 4) ^ exp);

	return CACHE(self)->hash;
}
Esempio n. 2
0
static inline Floating*
invalidate_cache (Floating* self)
{
	CACHE(self)->hash = 0;

	return self;
}
Esempio n. 3
0
ret_t
cherokee_iocache_configure (cherokee_iocache_t     *iocache,
			    cherokee_config_node_t *conf)
{
	ret_t            ret;
	cherokee_list_t *i;

	/* Configure parent class
	 */
	ret = cherokee_cache_configure (CACHE(iocache), conf);
	if (ret != ret_ok)
		return ret;

	/* Configure it own properties
	 */
	cherokee_config_node_foreach (i, conf) {
		cherokee_config_node_t *subconf = CONFIG_NODE(i);

		if (equal_buf_str (&subconf->key, "max_file_size")) {
			iocache->max_file_size = atoi(subconf->val.buf);
		} else if (equal_buf_str (&subconf->key, "min_file_size")) {
			iocache->min_file_size = atoi(subconf->val.buf);

		} else if (equal_buf_str (&subconf->key, "lasting_stat")) {
			iocache->lasting_stat = atoi(subconf->val.buf);
		} else if (equal_buf_str (&subconf->key, "lasting_mmap")) {
			iocache->lasting_mmap = atoi(subconf->val.buf);
		}
	}
Esempio n. 4
0
static NTSTATUS
GnttabRevokeForeignAccess(
    IN  PXENBUS_GNTTAB_CONTEXT      Context,
    IN  PXENBUS_GNTTAB_CACHE        Cache,
    IN  BOOLEAN                     Locked,
    IN  PXENBUS_GNTTAB_DESCRIPTOR   Descriptor
)
{
    grant_entry_v1_t                *Entry;
    volatile SHORT                  *Flags;
    ULONG                           Attempt;
    NTSTATUS                        status;

    ASSERT3U(Descriptor->Magic, ==, GNTTAB_DESCRIPTOR_MAGIC);
    ASSERT3U(Descriptor->Reference, >=, GNTTAB_RESERVED_ENTRY_COUNT);
    ASSERT3U(Descriptor->Reference, <, (Context->FrameIndex + 1) * GNTTAB_ENTRY_PER_FRAME);

    Entry = &Context->Entry[Descriptor->Reference];
    Flags = (volatile SHORT *)&Entry->flags;

    Attempt = 0;
    while (Attempt++ < 100) {
        uint16_t    Old;
        uint16_t    New;

        Old = *Flags;
        Old &= ~(GTF_reading | GTF_writing);

        New = Old & ~GTF_permit_access;

        if (InterlockedCompareExchange16(Flags, New, Old) == Old)
            break;

        SchedYield();
    }

    status = STATUS_UNSUCCESSFUL;
    if (Attempt == 100)
        goto fail1;

    RtlZeroMemory(Entry, sizeof (grant_entry_v1_t));
    RtlZeroMemory(&Descriptor->Entry, sizeof (grant_entry_v1_t));

    CACHE(Put,
          Context->CacheInterface,
          Cache->Cache,
          Descriptor,
          Locked);

    return STATUS_SUCCESS;

fail1:
    Error("fail1 (%08x)\n", status);

    return status;
}
Esempio n. 5
0
/* Read in the node at the current path and depth into the node cache.
 * You must set INFO->blocks[depth] before.
 */
static char *
read_tree_node( __u32 blockNr, __u16 depth )
{
     char *cache = CACHE(depth);
     int num_cached = INFO->cached_slots;
     errnum = 0;

     if ( depth < num_cached )
     {
	  /* This is the cached part of the path.
	     Check if same block is needed. */
	  if ( blockNr == INFO->blocks[depth] )
	       return cache;
     }
     else
	  cache = CACHE(num_cached);

     DEBUG_F( "  next read_in: block=%u (depth=%u)\n", blockNr, depth );

     if ( !block_read( blockNr, 0, INFO->blocksize, cache ) )
     {
	  DEBUG_F( "block_read failed\n" );
	  return 0;
     }

     DEBUG_F( "FOUND: blk_level=%u, blk_nr_item=%u, blk_free_space=%u\n",
	      blkh_level(BLOCKHEAD(cache)),
	      blkh_nr_item(BLOCKHEAD(cache)),
	      le16_to_cpu(BLOCKHEAD(cache)->blk_free_space) );

     /* Make sure it has the right node level */
     if ( blkh_level(BLOCKHEAD(cache)) != depth )
     {
	  DEBUG_F( "depth = %u != %u\n", blkh_level(BLOCKHEAD(cache)), depth );
	  DEBUG_LEAVE(FILE_ERR_BAD_FSYS);
	  errnum = FILE_ERR_BAD_FSYS;
	  return 0;
     }

     INFO->blocks[depth] = blockNr;
     return cache;
}
Esempio n. 6
0
hash_t
Floating_hash (Floating* self)
{
	if (CACHE(self)->hash) {
		return CACHE(self)->hash;
	}

	murmur3_t* state = MURMUR3_INIT(RUNTIME_FOR(self));

	MURMUR3_UPDATE_WITH(state, VALUE_TYPE_FLOATING);

	mpfr_exp_t exp;
	char*      string = mpfr_get_str(NULL, &exp, 32, 0, *self->value, MPFR_RNDN);
	size_t     size   = strlen(string);

	MURMUR3_UPDATE_WITH(state, exp);
	MURMUR3_UPDATE(state, string, size);

	CACHE(self)->hash = MURMUR3_FINAL(state);

	free(string);

	return CACHE(self)->hash;
}
Esempio n. 7
0
static NTSTATUS
GnttabPermitForeignAccess(
    IN  PXENBUS_GNTTAB_CONTEXT      Context,
    IN  PXENBUS_GNTTAB_CACHE        Cache,
    IN  BOOLEAN                     Locked,
    IN  USHORT                      Domain,
    IN  PFN_NUMBER                  Pfn,
    IN  BOOLEAN                     ReadOnly,
    OUT PXENBUS_GNTTAB_DESCRIPTOR   *Descriptor
)
{
    grant_entry_v1_t                *Entry;
    NTSTATUS                        status;

    *Descriptor = CACHE(Get,
                        Context->CacheInterface,
                        Cache->Cache,
                        Locked);

    status = STATUS_INSUFFICIENT_RESOURCES;
    if (*Descriptor == NULL)
        goto fail1;

    (*Descriptor)->Entry.flags = (ReadOnly) ? GTF_readonly : 0;
    (*Descriptor)->Entry.domid = Domain;

    (*Descriptor)->Entry.frame = (uint32_t)Pfn;
    ASSERT3U((*Descriptor)->Entry.frame, ==, Pfn);

    Entry = &Context->Entry[(*Descriptor)->Reference];

    *Entry = (*Descriptor)->Entry;
    KeMemoryBarrier();

    Entry->flags |= GTF_permit_access;
    KeMemoryBarrier();

    return STATUS_SUCCESS;

fail1:
    Error("fail1 (%08x)\n", status);

    return status;
}
Esempio n. 8
0
static VOID
GnttabDestroyCache(
    IN  PXENBUS_GNTTAB_CONTEXT  Context,
    IN  PXENBUS_GNTTAB_CACHE    Cache
)
{
    CACHE(Destroy,
          Context->CacheInterface,
          Cache->Cache);
    Cache->Cache = NULL;

    Cache->Argument = NULL;
    Cache->ReleaseLock = NULL;
    Cache->AcquireLock = NULL;

    RtlZeroMemory(Cache->Name, sizeof (Cache->Name));

    Cache->Context = NULL;

    ASSERT(IsZeroMemory(Cache, sizeof (XENBUS_GNTTAB_CACHE)));
    __GnttabFree(Cache);
}
Esempio n. 9
0
#include <core/const.h>
#include <core/list.h>
#include <memory/slab.h>
#include <memory/buddy.h>
#include <video/console.h>

typedef struct MallocSize{
   u32 size;
   SlabCache *cache;
} MallocSize;

static MallocSize mallocSizes[] = {
#define CACHE(x) {.size = x,.cache = 0}
   CACHE(32),
   CACHE(64),
   CACHE(128),
   CACHE(256),
   CACHE(512),
   CACHE(1024),
   CACHE(2048),
   CACHE(4096),
   CACHE(8192),
   CACHE(16384),
   CACHE(32768),
   CACHE(65536),
   CACHE(131072),
   CACHE(262144),
   CACHE(524288),
   CACHE(1048576) /*1MB*/
#undef CACHE
};
Esempio n. 10
0
 ** Configuration **
 *******************/

#define DEBUG_MALLOC 0

/********************
 ** Implementation **
 ********************/

/*
 * These are the default caches for kmalloc. Custom caches can have other sizes.
 */
static struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
	CACHE(ULONG_MAX)
#undef CACHE
};


/*
 * kmalloc() cache names
 */
static const char *malloc_names[] = {
#define CACHE(x) "size-" #x,
#include <linux/kmalloc_sizes.h>
	NULL
#undef CACHE
};

Esempio n. 11
0
static krb5_error_code
krb5int_camellia_encrypt(krb5_key key, const krb5_data *ivec,
                         krb5_crypto_iov *data, size_t num_data)
{
    unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE];
    int nblocks = 0, blockno;
    size_t input_length, i;
    struct iov_block_state input_pos, output_pos;

    if (key->cache == NULL) {
        key->cache = malloc(sizeof(struct camellia_key_info_cache));
        if (key->cache == NULL)
            return ENOMEM;
        CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0;
    }
    if (CACHE(key)->enc_ctx.keybitlen == 0) {
        if (camellia_enc_key(key->keyblock.contents, key->keyblock.length,
                             &CACHE(key)->enc_ctx) != camellia_good)
            abort();
    }
    if (ivec != NULL)
        memcpy(tmp, ivec->data, BLOCK_SIZE);
    else
        memset(tmp, 0, BLOCK_SIZE);

    for (i = 0, input_length = 0; i < num_data; i++) {
        krb5_crypto_iov *iov = &data[i];

        if (ENCRYPT_IOV(iov))
            input_length += iov->data.length;
    }

    IOV_BLOCK_STATE_INIT(&input_pos);
    IOV_BLOCK_STATE_INIT(&output_pos);

    nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
    if (nblocks == 1) {
        krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos);
        enc(tmp2, tmp, &CACHE(key)->enc_ctx);
        krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos);
    } else if (nblocks > 1) {
        unsigned char blockN2[BLOCK_SIZE];   /* second last */
        unsigned char blockN1[BLOCK_SIZE];   /* last block */

        for (blockno = 0; blockno < nblocks - 2; blockno++) {
            unsigned char blockN[BLOCK_SIZE], *block;

            krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE,
                                           data, num_data, &input_pos, &block);
            xorblock(tmp, block);
            enc(block, tmp, &CACHE(key)->enc_ctx);
            krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE,
                                           &output_pos, block);

            /* Set up for next block.  */
            memcpy(tmp, block, BLOCK_SIZE);
        }

        /* Do final CTS step for last two blocks (the second of which
           may or may not be incomplete).  */

        /* First, get the last two blocks */
        memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */
        krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data,
                                &input_pos);
        krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data,
                                &input_pos);

        /* Encrypt second last block */
        xorblock(tmp, blockN2);
        enc(tmp2, tmp, &CACHE(key)->enc_ctx);
        memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */
        memcpy(tmp, tmp2, BLOCK_SIZE);

        /* Encrypt last block */
        xorblock(tmp, blockN1);
        enc(tmp2, tmp, &CACHE(key)->enc_ctx);
        memcpy(blockN1, tmp2, BLOCK_SIZE);

        /* Put the last two blocks back into the iovec (reverse order) */
        krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE,
                                &output_pos);
        krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE,
                                &output_pos);

        if (ivec != NULL)
            memcpy(ivec->data, blockN1, BLOCK_SIZE);
    }

    return 0;
}
Esempio n. 12
0
static krb5_error_code
krb5int_camellia_decrypt(krb5_key key, const krb5_data *ivec,
                         krb5_crypto_iov *data, size_t num_data)
{
    unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE];
    int nblocks = 0, blockno;
    unsigned int i;
    size_t input_length;
    struct iov_block_state input_pos, output_pos;

    if (key->cache == NULL) {
        key->cache = malloc(sizeof(struct camellia_key_info_cache));
        if (key->cache == NULL)
            return ENOMEM;
        CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0;
    }
    if (CACHE(key)->dec_ctx.keybitlen == 0) {
        if (camellia_dec_key(key->keyblock.contents, key->keyblock.length,
                             &CACHE(key)->dec_ctx) != camellia_good)
            abort();
    }

    if (ivec != NULL)
        memcpy(tmp, ivec->data, BLOCK_SIZE);
    else
        memset(tmp, 0, BLOCK_SIZE);

    for (i = 0, input_length = 0; i < num_data; i++) {
        krb5_crypto_iov *iov = &data[i];

        if (ENCRYPT_IOV(iov))
            input_length += iov->data.length;
    }

    IOV_BLOCK_STATE_INIT(&input_pos);
    IOV_BLOCK_STATE_INIT(&output_pos);

    nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
    if (nblocks == 1) {
        krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos);
        dec(tmp2, tmp, &CACHE(key)->dec_ctx);
        krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos);
    } else if (nblocks > 1) {
        unsigned char blockN2[BLOCK_SIZE];   /* second last */
        unsigned char blockN1[BLOCK_SIZE];   /* last block */

        for (blockno = 0; blockno < nblocks - 2; blockno++) {
            unsigned char blockN[BLOCK_SIZE], *block;

            krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE,
                                           data, num_data, &input_pos, &block);
            memcpy(tmp2, block, BLOCK_SIZE);
            dec(block, block, &CACHE(key)->dec_ctx);
            xorblock(block, tmp);
            memcpy(tmp, tmp2, BLOCK_SIZE);
            krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE,
                                           &output_pos, block);
        }

        /* Do last two blocks, the second of which (next-to-last block
           of plaintext) may be incomplete.  */

        /* First, get the last two encrypted blocks */
        memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */
        krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data,
                                &input_pos);
        krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data,
                                &input_pos);

        if (ivec != NULL)
            memcpy(ivec->data, blockN2, BLOCK_SIZE);

        /* Decrypt second last block */
        dec(tmp2, blockN2, &CACHE(key)->dec_ctx);
        /* Set tmp2 to last (possibly partial) plaintext block, and
           save it.  */
        xorblock(tmp2, blockN1);
        memcpy(blockN2, tmp2, BLOCK_SIZE);

        /* Maybe keep the trailing part, and copy in the last
           ciphertext block.  */
        input_length %= BLOCK_SIZE;
        memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE);
        dec(tmp3, tmp2, &CACHE(key)->dec_ctx);
        xorblock(tmp3, tmp);
        memcpy(blockN1, tmp3, BLOCK_SIZE);

        /* Put the last two blocks back into the iovec */
        krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE,
                                &output_pos);
        krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE,
                                &output_pos);
    }

    return 0;
}
Esempio n. 13
0
static NTSTATUS
GnttabCreateCache(
    IN  PXENBUS_GNTTAB_CONTEXT  Context,
    IN  const CHAR              *Name,
    IN  ULONG                   Reservation,
    IN  VOID                    (*AcquireLock)(PVOID),
    IN  VOID                    (*ReleaseLock)(PVOID),
    IN  PVOID                   Argument,
    OUT PXENBUS_GNTTAB_CACHE    *Cache
)
{
    NTSTATUS                    status;

    *Cache = __GnttabAllocate(sizeof (XENBUS_GNTTAB_CACHE));

    status = STATUS_NO_MEMORY;
    if (*Cache == NULL)
        goto fail1;

    (*Cache)->Context = Context;

    status = RtlStringCbPrintfA((*Cache)->Name,
                                sizeof ((*Cache)->Name),
                                "%s_gnttab",
                                Name);
    if (!NT_SUCCESS(status))
        goto fail2;

    (*Cache)->AcquireLock = AcquireLock;
    (*Cache)->ReleaseLock = ReleaseLock;
    (*Cache)->Argument = Argument;

    status = CACHE(Create,
                   Context->CacheInterface,
                   (*Cache)->Name,
                   sizeof (XENBUS_GNTTAB_DESCRIPTOR),
                   Reservation,
                   GnttabDescriptorCtor,
                   GnttabDescriptorDtor,
                   GnttabAcquireLock,
                   GnttabReleaseLock,
                   *Cache,
                   &(*Cache)->Cache);
    if (!NT_SUCCESS(status))
        goto fail3;

    return STATUS_SUCCESS;

fail3:
    Error("fail3\n");

    (*Cache)->Argument = NULL;
    (*Cache)->ReleaseLock = NULL;
    (*Cache)->AcquireLock = NULL;

    RtlZeroMemory((*Cache)->Name, sizeof ((*Cache)->Name));

fail2:
    Error("fail2\n");

    (*Cache)->Context = NULL;

    ASSERT(IsZeroMemory(*Cache, sizeof (XENBUS_GNTTAB_CACHE)));
    __GnttabFree(*Cache);

fail1:
    Error("fail1 (%08x)\n", status);

    return status;
}
Esempio n. 14
0
namespace CPUID2 {

typedef u8 Descriptor;
typedef std::vector<Descriptor> Descriptors;

static void AppendDescriptors(u32 reg, Descriptors& descriptors)
{
	if(IsBitSet(reg, 31))	// register contents are reserved
		return;
	for(int pos = 24; pos >= 0; pos -= 8)
	{
		const u8 descriptor = (u8)bits(reg, pos, pos+7);
		if(descriptor != 0)
			descriptors.push_back(descriptor);
	}
}


static Descriptors GetDescriptors()
{
	// ensure consistency by pinning to a CPU.
	// (don't use a hard-coded mask because process affinity may be restricted)
	const uintptr_t allProcessors = os_cpu_ProcessorMask();
	const uintptr_t firstProcessor = allProcessors & -intptr_t(allProcessors);
	const uintptr_t prevAffinityMask = os_cpu_SetThreadAffinityMask(firstProcessor);

	x86_x64::CpuidRegs regs = { 0 };
	regs.eax = 2;
	if(!x86_x64::cpuid(&regs))
		return Descriptors();

	Descriptors descriptors;
	size_t iterations = bits(regs.eax, 0, 7);
	for(;;)	// abort mid-loop (invoke CPUID exactly <iterations> times)
	{
		AppendDescriptors(bits(regs.eax, 8, 31), descriptors);
		AppendDescriptors(regs.ebx, descriptors);
		AppendDescriptors(regs.ecx, descriptors);
		AppendDescriptors(regs.edx, descriptors);
		if(--iterations == 0)
			break;
		regs.eax = 2;
		const bool ok = x86_x64::cpuid(&regs);
		ENSURE(ok);
	}

	os_cpu_SetThreadAffinityMask(prevAffinityMask);

	return descriptors;
}


// note: the following cannot be moved into a function because
// ARRAY_SIZE's template argument must not reference a local type.

enum Flags
{
	// level (bits 0..1)
	L1 = 1,
	L2,
	L3,

	// type (bits 2..3)
	I   = 0x04,	// instruction
	D   = 0x08,	// data
	U   = I|D	// unified

	// largeSize (bits 4..31 with bits 0..3 zeroed): TLB entrySize or cache numEntries
};

// (there are > 100 descriptors, so we squeeze all fields into 8 bytes.)
struct Characteristics	// POD
{
	x86_x64::Cache::Type Type() const
	{
		switch(flags & U)
		{
		case D:
			return x86_x64::Cache::kData;
		case I:
			return x86_x64::Cache::kInstruction;
		case U:
			return x86_x64::Cache::kUnified;
		default:
			DEBUG_WARN_ERR(ERR::LOGIC);
			return x86_x64::Cache::kNull;
		}
	}

	size_t Level() const
	{
		const size_t level = flags & 3;
		ENSURE(level != 0);
		return level;
	}

	bool IsTLB() const
	{
		return smallSize >= 0;
	}

	size_t NumEntries() const
	{
		return IsTLB()? (size_t)smallSize : (flags & ~0xF);
	}

	size_t EntrySize() const
	{
		return IsTLB()? (flags & ~0xF) : (size_t)(-smallSize);
	}

	u8 descriptor;
	u8 associativity;
	i16 smallSize;	// negative cache entrySize or TLB numEntries
	u32 flags;	// level, type, largeSize
};

static const u8 F = x86_x64::Cache::fullyAssociative;

#define CACHE(descriptor, flags, totalSize, assoc, entrySize)  { descriptor, assoc, -entrySize, flags | ((totalSize)/(entrySize)) }
#define TLB(descriptor, flags, entrySize, assoc, numEntries) { descriptor, assoc, numEntries, flags | (entrySize) }

// (we need to include cache descriptors because early Pentium4 don't implement CPUID.4)
// references: [accessed 2011-02-26]
// AP485 http://www.intel.com/Assets/PDF/appnote/241618.pdf
// sdman http://www.intel.com/Assets/PDF/manual/253666.pdf
// sandp http://www.sandpile.org/ia32/cpuid.htm
// opsol http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/i86pc/os/cpuid.c
static const Characteristics characteristicsTable[] =
{
	TLB  (0x01, L1|I,   4*KiB,  4,  32),
	TLB  (0x02, L1|I,   4*MiB,  F,   2),
	TLB  (0x03, L1|D,   4*KiB,  4,  64),
	TLB  (0x04, L1|D,   4*MiB,  4,   8),
	TLB  (0x05, L1|D,   4*MiB,  4,  32),

	CACHE(0x06, L1|I,   8*KiB,  4,  32),
	CACHE(0x08, L1|I,  16*KiB,  4,  32),
	CACHE(0x09, L1|I,  32*KiB,  4,  64),
	CACHE(0x0A, L1|I,   8*KiB,  2,  32),

	TLB  (0x0B, L1|I,   4*MiB,  4,   4),

	CACHE(0x0C, L1|D,  16*KiB,  4,  32),
	CACHE(0x0D, L1|D,  16*KiB,  4,  64),	// opsol: 32B (would be redundant with 0x0C), AP485: 64B, sdman: 64B
	CACHE(0x0E, L1|D,  24*KiB,  6,  64),

	CACHE(0x21, L2|U, 256*KiB,  8,  64),

	CACHE(0x22, L3|U, 512*KiB,  4,  64),
	CACHE(0x23, L3|U,   1*MiB,  8,  64),
	CACHE(0x25, L3|U,   2*MiB,  8,  64),
	CACHE(0x29, L3|U,   4*MiB,  8,  64),

	CACHE(0x2c, L1|D,  32*KiB,  8,  64),

	CACHE(0x30, L1|I,  32*KiB,  8,  64),

	CACHE(0x39, L2|U, 128*KiB,  4,  64),
	CACHE(0x3A, L2|U, 192*KiB,  6,  64),
	CACHE(0x3B, L2|U, 128*KiB,  2,  64),
	CACHE(0x3C, L2|U, 256*KiB,  4,  64),
	CACHE(0x3D, L2|U, 384*KiB,  6,  64),
	CACHE(0x3E, L2|U, 512*KiB,  4,  64),
	CACHE(0x41, L2|U, 128*KiB,  4,  32),
	CACHE(0x42, L2|U, 256*KiB,  4,  32),
	CACHE(0x43, L2|U, 512*KiB,  4,  32),
	CACHE(0x44, L2|U,   1*MiB,  4,  32),
	CACHE(0x45, L2|U,   2*MiB,  4,  32),

	CACHE(0x46, L3|U,   4*MiB,  4,  64),
	CACHE(0x47, L3|U,   8*MiB,  8,  64),
	CACHE(0x48, L2|U,   3*MiB, 12,  64),
	CACHE(0x49, L2|U,   4*MiB, 16,  64),
	CACHE(0x49, L3|U,   4*MiB, 16,  64),
	CACHE(0x4A, L3|U,   6*MiB, 12,  64),
	CACHE(0x4B, L3|U,   8*MiB, 16,  64),
	CACHE(0x4C, L3|U,  12*MiB, 12,  64),
	CACHE(0x4D, L3|U,  16*MiB, 16,  64),
	CACHE(0x4E, L2|U,   6*MiB, 24,  64),

	TLB  (0x4F, L1|I,   4*KiB,  F,  32),	// sandp: unknown assoc, opsol: full, AP485: unspecified
	TLB  (0x50, L1|I,   4*KiB,  F,  64),
	TLB  (0x50, L1|I,   4*MiB,  F,  64),
	TLB  (0x50, L1|I,   2*MiB,  F,  64),
	TLB  (0x51, L1|I,   4*KiB,  F, 128),
	TLB  (0x51, L1|I,   4*MiB,  F, 128),
	TLB  (0x51, L1|I,   2*MiB,  F, 128),
	TLB  (0x52, L1|I,   4*KiB,  F, 256),
	TLB  (0x52, L1|I,   4*MiB,  F, 256),
	TLB  (0x52, L1|I,   2*MiB,  F, 256),
	TLB  (0x55, L1|I,   4*MiB,  F,   7),
	TLB  (0x55, L1|I,   2*MiB,  F,   7),

	TLB  (0x56, L1|D,   4*MiB,  4,  16),
	TLB  (0x57, L1|D,   4*KiB,  4,  16),
	TLB  (0x59, L1|D,   4*KiB,  F,  16),
	TLB  (0x5A, L1|D,   4*MiB,  4,  32),
	TLB  (0x5A, L1|D,   2*MiB,  4,  32),
	TLB  (0x5B, L1|D,   4*KiB,  F,  64),
	TLB  (0x5B, L1|D,   4*MiB,  F,  64),
	TLB  (0x5C, L1|D,   4*KiB,  F, 128),
	TLB  (0x5C, L1|D,   4*MiB,  F, 128),
	TLB  (0x5D, L1|D,   4*KiB,  F, 256),
	TLB  (0x5D, L1|D,   4*MiB,  F, 256),

	CACHE(0x60, L1|D,  16*KiB,  8,  64),
	TLB  (0x63, L1|D,   1*GiB,  4,   4),    // speculation
	CACHE(0x66, L1|D,   8*KiB,  4,  64),
	CACHE(0x67, L1|D,  16*KiB,  4,  64),
	CACHE(0x68, L1|D,  32*KiB,  4,  64),

	CACHE(0x70, L1|I,  12*KiB,  8,   1),
	CACHE(0x71, L1|I,  16*KiB,  8,   1),
	CACHE(0x72, L1|I,  32*KiB,  8,   1),
	CACHE(0x73, L1|I,  64*KiB,  8,   1),

	TLB  (0x76, L1|I,   4*MiB,  F,   8),	// AP485: internally inconsistent, sdman: TLB
	TLB  (0x76, L1|I,   2*MiB,  F,   8),

	CACHE(0x78, L2|U,   1*MiB,  4,  64),
	CACHE(0x79, L2|U, 128*KiB,  8,  64),
	CACHE(0x7A, L2|U, 256*KiB,  8,  64),
	CACHE(0x7B, L2|U, 512*KiB,  8,  64),
	CACHE(0x7C, L2|U,   1*MiB,  8,  64),
	CACHE(0x7D, L2|U,   2*MiB,  8,  64),
	CACHE(0x7F, L2|U, 512*KiB,  2,  64),

	CACHE(0x80, L2|U, 512*KiB,  8,  64),
	CACHE(0x82, L2|U, 256*KiB,  8,  32),
	CACHE(0x83, L2|U, 512*KiB,  8,  32),
	CACHE(0x84, L2|U,   1*MiB,  8,  32),
	CACHE(0x85, L2|U,   2*MiB,  8,  32),
	CACHE(0x86, L2|U, 512*KiB,  4,  64),
	CACHE(0x87, L2|U,   1*MiB,  8,  64),

	TLB  (0xB0, L1|I,   4*KiB,  4, 128),
	TLB  (0xB1, L1|I,   2*MiB,  4,   8),
	TLB  (0xB1, L1|I,   4*MiB,  4,   4),
	TLB  (0xB2, L1|I,   4*KiB,  4,  64),

	TLB  (0xB3, L1|D,   4*KiB,  4, 128),
	TLB  (0xB3, L1|D,   4*MiB,  4, 128),
	TLB  (0xB4, L1|D,   4*KiB,  4, 256),
	TLB  (0xB4, L1|D,   4*MiB,  4, 256),
	TLB  (0xB5, L1|I,   4*KiB,  4, 128),    // speculation
	TLB  (0xB6, L1|I,   4*KiB,  8, 128),    // http://software.intel.com/en-us/forums/topic/401012

	TLB  (0xBA, L1|D,   4*KiB,  4,  64),
	TLB  (0xC0, L1|D,   4*KiB,  4,   8),
	TLB  (0xC0, L1|D,   4*MiB,  4,   8),
	TLB  (0xC1, L2|U,   4*KiB,  8, 1024),   // http://software.intel.com/en-us/forums/topic/401012
	TLB  (0xC1, L2|U,   4*MiB,  8, 1024),
	TLB  (0xC1, L2|U,   2*MiB,  8, 1024),
	TLB  (0xCA, L2|U,   4*KiB,  4, 512),

	CACHE(0xD0, L3|U, 512*KiB,  4,  64),
	CACHE(0xD1, L3|U,   1*MiB,  4,  64),
	CACHE(0xD2, L3|U,   2*MiB,  4,  64),
	CACHE(0xD6, L3|U,   1*MiB,  8,  64),
	CACHE(0xD7, L3|U,   2*MiB,  8,  64),
	CACHE(0xD8, L3|U,   4*MiB,  8,  64),
	CACHE(0xDC, L3|U, 3*MiB/2, 12,  64),
	CACHE(0xDD, L3|U,   3*MiB, 12,  64),
	CACHE(0xDE, L3|U,   6*MiB, 12,  64),
	CACHE(0xE2, L3|U,   2*MiB, 16,  64),
	CACHE(0xE3, L3|U,   4*MiB, 16,  64),
	CACHE(0xE4, L3|U,   8*MiB, 16,  64),
	CACHE(0xEA, L3|U,  12*MiB, 24,  64),
	CACHE(0xEB, L3|U,  18*MiB, 24,  64),
	CACHE(0xEC, L3|U,  24*MiB, 24,  64),
};
#undef CACHE
#undef TLB

static const Characteristics* CharacteristicsFromDescriptor(Descriptor descriptor)
{
	// note: we can't use bsearch because characteristicsTable contains multiple
	// entries with the same descriptor.
	for(size_t i = 0; i < ARRAY_SIZE(characteristicsTable); i++)
	{
		const Characteristics& characteristics = characteristicsTable[i];
		if(characteristics.descriptor == descriptor)
			return &characteristics;
	}

	debug_printf(L"Unknown cache/TLB descriptor 0x%x\n", (unsigned int)descriptor);
	return 0;
}


enum DescriptorFlags
{
	SKIP_CACHE_DESCRIPTORS = 1,
	NO_LAST_LEVEL_CACHE    = 2,
	PREFETCH64             = 64,
	PREFETCH128            = 128
};

static bool HandleSpecialDescriptor(Descriptor descriptor, size_t& descriptorFlags)
{
	switch(descriptor)
	{
	case 0:	// carries no information
		return true;

	case 0x40:
		descriptorFlags |= NO_LAST_LEVEL_CACHE;
		return true;

	case 0xF0:
		descriptorFlags |= PREFETCH64;
		return true;

	case 0xF1:
		descriptorFlags |= PREFETCH128;
		return true;

	case 0xFF:	// descriptors don't include caches (use CPUID.4 instead)
		descriptorFlags |= SKIP_CACHE_DESCRIPTORS;
		return true;

	default:
		return false;
	}
}


static void DetectCacheAndTLB(size_t& descriptorFlags)
{
	const Descriptors descriptors = GetDescriptors();
	for(Descriptors::const_iterator it = descriptors.begin(); it != descriptors.end(); ++it)
	{
		const Descriptor descriptor = *it;
		if(HandleSpecialDescriptor(descriptor, descriptorFlags))
			continue;

		const Characteristics* characteristics = CharacteristicsFromDescriptor(*it);
		if(!characteristics)
			continue;

		if((descriptorFlags & SKIP_CACHE_DESCRIPTORS) && !characteristics->IsTLB())
			continue;

		x86_x64::Cache cache;
		cache.Initialize(characteristics->Level(), characteristics->Type());
		cache.numEntries    = characteristics->NumEntries();
		cache.entrySize     = characteristics->EntrySize();
		cache.associativity = characteristics->associativity;
		cache.sharedBy      = 1;	// (safe default)
		if(characteristics->IsTLB())
			AddTLB(cache);
		else
			AddCache(cache);
	}
}

}	// namespace CPUID2
Esempio n. 15
0
/* Get the next key, i.e. the key following the last retrieved key in
 * tree order.  INFO->current_ih and
 * INFO->current_info are adapted accordingly.  */
static int
next_key( void )
{
     __u16 depth;
     struct item_head *ih = INFO->current_ih + 1;
     char *cache;


     DEBUG_F( "next_key:\n  old ih: key %u:%u:%u:%u version:%u\n",
	      le32_to_cpu(INFO->current_ih->ih_key.k_dir_id),
	      le32_to_cpu(INFO->current_ih->ih_key.k_objectid),
	      le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_offset),
	      le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_uniqueness),
	      ih_version(INFO->current_ih) );


     if ( ih == &ITEMHEAD[blkh_nr_item(BLOCKHEAD( LEAF ))] )
     {
	  depth = BLKH_LEVEL_LEAF;
	  /* The last item, was the last in the leaf node. * Read in the next
	   * * block */
	  do
	  {
	       if ( depth == INFO->tree_depth )
	       {
		    /* There are no more keys at all. * Return a dummy item with
		     * * MAX_KEY */
		    ih =
			 ( struct item_head * )
			 &BLOCKHEAD( LEAF )->blk_right_delim_key;
		    goto found;
	       }
	       depth++;

	       DEBUG_F( "  depth=%u, i=%u\n", depth, INFO->next_key_nr[depth] );

	  }
	  while ( INFO->next_key_nr[depth] == 0 );

	  if ( depth == INFO->tree_depth )
	       cache = ROOT;
	  else if ( depth <= INFO->cached_slots )
	       cache = CACHE( depth );
	  else
	  {
	       cache = read_tree_node( INFO->blocks[depth], --depth );
	       if ( !cache )
		    return 0;
	  }

	  do
	  {
	       __u16 nr_item = blkh_nr_item(BLOCKHEAD( cache ));
	       int key_nr = INFO->next_key_nr[depth]++;


	       DEBUG_F( "  depth=%u, i=%u/%u\n", depth, key_nr, nr_item );

	       if ( key_nr == nr_item )
		    /* This is the last item in this block, set the next_key_nr *
		     * to 0 */
		    INFO->next_key_nr[depth] = 0;

	       cache =
		    read_tree_node( dc_block_number( &(DC( cache )[key_nr])),
				    --depth );
	       if ( !cache )
		    return 0;
	  }
	  while ( depth > BLKH_LEVEL_LEAF );

	  ih = ITEMHEAD;
     }
found:
     INFO->current_ih = ih;
     INFO->current_item = &LEAF[ih_location(ih)];

     DEBUG_F( "  new ih: key %u:%u:%u:%u version:%u\n",
	      le32_to_cpu(INFO->current_ih->ih_key.k_dir_id),
	      le32_to_cpu(INFO->current_ih->ih_key.k_objectid),
	      le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_offset),
	      le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_uniqueness),
	      ih_version(INFO->current_ih) );

     return 1;
}