Exemple #1
0
static void burn_credits(struct csched_vcpu *svc, s_time_t now)
{
    s_time_t delta;
    unsigned int credits;

    /* Assert svc is current */
    ASSERT(svc==CSCHED_VCPU(per_cpu(schedule_data, svc->vcpu->processor).curr));

    if ( (delta = now - svc->start_time) <= 0 )
        return;

    credits = (delta*CSCHED_CREDITS_PER_MSEC + MILLISECS(1)/2) / MILLISECS(1);
    atomic_sub(credits, &svc->credit);
    svc->start_time += (credits * MILLISECS(1)) / CSCHED_CREDITS_PER_MSEC;
}
Exemple #2
0
/* Blocks the thread until a message arrives in the mailbox, but does
 * not block the thread longer than "timeout" milliseconds (similar to
 * the sys_arch_sem_wait() function). The "msg" argument is a result
 * parameter that is set by the function (i.e., by doing "*msg =
 * ptr"). The "msg" parameter maybe NULL to indicate that the message
 * should be dropped.
 *
 * The return values are the same as for the sys_arch_sem_wait() function:
 * Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a
 * timeout. */
u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout)
{
    int flags;
    int64_t then = NOW();
    int64_t deadline;

    if (timeout == 0)
	deadline = 0;
    else
	deadline = then + MILLISECS(timeout);

    while(1) {
        wait_event_deadline(mbox->read_sem.wait, (mbox->read_sem.count > 0), deadline);

        local_irq_save(flags);
	/* Atomically check that we can proceed */
	if (mbox->read_sem.count > 0 || (deadline && NOW() >= deadline))
	    break;
        local_irq_restore(flags);
    }

    if (mbox->read_sem.count <= 0) {
      local_irq_restore(flags);
      return SYS_ARCH_TIMEOUT;
    }

    mbox->read_sem.count--;
    local_irq_restore(flags);
    do_mbox_fetch(mbox, msg);
    return 0;
}
Exemple #3
0
int xencons_ring_send_no_notify(struct consfront_dev *dev, const char *data, unsigned len)
{	
    int sent = 0;
	struct xencons_interface *intf;
	XENCONS_RING_IDX cons, prod;

	if (!dev)
            intf = xencons_interface();
        else
            intf = dev->ring;
        if (!intf)
            return sent;

    while (sent < len) {
	cons = intf->out_cons;
	prod = intf->out_prod;
	mb();
	BUG_ON((prod - cons) > sizeof(intf->out));

	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];

	wmb();
	intf->out_prod = prod;

	if (sent < len) {
	    block_domain(MILLISECS(10));
	}
    }
    
    return sent;
}
/* This function should be called soon after each time the MSB of the
 * pmtimer register rolls over, to make sure we update the status
 * registers and SCI at least once per rollover */
static void pmt_timer_callback(void *opaque)
{
    PMTState *s = opaque;
    uint32_t pmt_cycles_until_flip;
    uint64_t time_until_flip;

    spin_lock(&s->lock);

    /* Recalculate the timer and make sure we get an SCI if we need one */
    pmt_update_time(s);

    /* How close are we to the next MSB flip? */
    pmt_cycles_until_flip = TMR_VAL_MSB - (s->pm.tmr_val & (TMR_VAL_MSB - 1));

    /* Overall time between MSB flips */
    time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;

    /* Reduced appropriately */
    time_until_flip = (time_until_flip * pmt_cycles_until_flip) >> 23;

    /* Wake up again near the next bit-flip */
    set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));

    spin_unlock(&s->lock);
}
Exemple #5
0
/* Blocks the thread while waiting for the semaphore to be
 * signaled. If the "timeout" argument is non-zero, the thread should
 * only be blocked for the specified time (measured in
 * milliseconds).
 *
 * If the timeout argument is non-zero, the return value is the number of
 * milliseconds spent waiting for the semaphore to be signaled. If the
 * semaphore wasn't signaled within the specified time, the return value is
 * SYS_ARCH_TIMEOUT. If the thread didn't have to wait for the semaphore
 * (i.e., it was already signaled), the function may return zero. */
u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout)
{
    /* Slightly more complicated than the normal minios semaphore:
     * need to wake on timeout *or* signal */
    int flags;
    int64_t then = NOW();
    int64_t deadline;

    if (timeout == 0)
	deadline = 0;
    else
	deadline = then + MILLISECS(timeout);

    while(1) {
        wait_event_deadline(sem->sem.wait, (sem->sem.count > 0), deadline);

        local_irq_save(flags);
	/* Atomically check that we can proceed */
	if (sem->sem.count > 0 || (deadline && NOW() >= deadline))
	    break;
        local_irq_restore(flags);
    }

    if (sem->sem.count > 0) {
        sem->sem.count--;
        local_irq_restore(flags);
        return NSEC_TO_MSEC(NOW() - then);
    }

    local_irq_restore(flags);
    return SYS_ARCH_TIMEOUT;
}
Exemple #6
0
static bool_t create_client(ext2fs_st            *st, 
			    Client_st            *c,
			    IDCServerBinding_clp  binding)
{
    int          i;
    USD_QoS      qos;

    c->fs=st;
    CL_INIT(c->cl, &fs_ms, c);
    c->binding=binding;

    /* Default QoS? Hmm. */
    c->qos.p = MILLISECS(200);
    c->qos.s = MILLISECS(10);
    c->qos.l = MILLISECS(5);
    c->qos.x = True;

    qos.p = c->qos.p;
    qos.s = c->qos.s;
    qos.x = c->qos.x;

    for (i=0; i<MAX_CLIENT_HANDLES; i++) {
	c->handles[i].used=False;
    }

    /* Create a stream for the client. This involves calling the USD domain;
       must be careful we don't deadlock. */
    if(USDCtl$CreateStream(st->disk.usdctl,
			   (USD_ClientID)(word_t)c,
			   &(c->usd_stream),
			   &(c->usd))
       != USDCtl_Error_None)
    {
	return False;
    }
    if(USDCtl$AdjustQoS(st->disk.usdctl,
			c->usd_stream,
			&qos)
       != USDCtl_Error_None)
    {
	(void)USDCtl$DestroyStream(st->disk.usdctl,
				   c->usd_stream);
	return False;
    }

    return True;
}
Exemple #7
0
static void __init parse_snb_timeout(const char *s)
{
    int t;

    t = parse_bool(s);
    if ( t < 0 )
    {
        if ( *s == '\0' )
            t = SNB_IGD_TIMEOUT_LEGACY;
        else if ( strcmp(s, "cap") == 0 )
            t = SNB_IGD_TIMEOUT;
        else
            t = strtoul(s, NULL, 0);
    }
    else
        t = t ? SNB_IGD_TIMEOUT_LEGACY : 0;
    snb_igd_timeout = MILLISECS(t);

    return;
}
static void intel_thermal_interrupt(struct cpu_user_regs *regs)
{
    uint64_t msr_content;
    unsigned int cpu = smp_processor_id();
    static DEFINE_PER_CPU(s_time_t, next);

    ack_APIC_irq();

    if (NOW() < per_cpu(next, cpu))
        return;

    per_cpu(next, cpu) = NOW() + MILLISECS(5000);
    rdmsrl(MSR_IA32_THERM_STATUS, msr_content);
    if (msr_content & 0x1) {
        printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu);
        printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n",
                cpu);
        add_taint(TAINT_MACHINE_CHECK);
    } else {
        printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
    }
}
Exemple #9
0
/* P4/Xeon Thermal transition interrupt handler */
static void intel_thermal_interrupt(struct cpu_user_regs *regs)
{
	u32 l, h;
	unsigned int cpu = smp_processor_id();
	static s_time_t next[NR_CPUS];

	ack_APIC_irq();

	if (NOW() > next[cpu])
		return;

	next[cpu] = NOW() + MILLISECS(5000);
	rdmsr(MSR_IA32_THERM_STATUS, l, h);
	if (l & 0x1) {
		printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu);
		printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n",
				cpu);
		add_taint(TAINT_MACHINE_CHECK);
	} else {
		printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
	}
}
Exemple #10
0
static void __ns16550_poll(struct cpu_user_regs *regs)
{
    struct serial_port *port = this_cpu(poll_port);
    struct ns16550 *uart = port->uart;

    if ( uart->intr_works )
        return;     /* Interrupts work - no more polling */

    if ( uart->probing ) {
        uart->probing = 0;
        if ( (ns_read_reg(uart, LSR) & 0xff) == 0xff )
            return;     /* All bits set - probably no UART present */
    }

    while ( ns_read_reg(uart, LSR) & LSR_DR )
        serial_rx_interrupt(port, regs);

    if ( ns_read_reg(uart, LSR) & LSR_THRE )
        serial_tx_interrupt(port, regs);

    set_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
}
Exemple #11
0
static void entry(void *data)
{
    struct cd_init *init = data;

    kernel_st *kst = init->kst;
    Wr_clp output = init->output;
    
    uint8_t c;
    uint8_t linebuf[512];
    uint32_t index;

    Heap$Free(Pvs(heap),init);

    index=0;
    for (;;) {
	/* Wait for some data */
	while(kst->console.head == kst->console.tail) {
	    PAUSE(MILLISECS(500));
	}
	/* Fetch characters */
	while(kst->console.head != kst->console.tail) {
	    c=kst->console.buf[kst->console.tail++];
	    if (kst->console.tail==CONSOLEBUF_SIZE) kst->console.tail=0;
	    if (c=='\n') {
		linebuf[index++]='\n';
		linebuf[index++]=0;

		Wr$PutStr(output,linebuf);

		index=0;
	    } else {
		linebuf[index++]=c;
	    }
	}
    }
    /* NOTREACHED */
}
Exemple #12
0
struct tpm_chip* init_tpm_tis(unsigned long baseaddr, int localities, unsigned int irq)
{
   int i;
   unsigned long addr;
   struct tpm_chip* tpm = NULL;
   uint32_t didvid;
   uint32_t intfcaps;
   uint32_t intmask;

   printk("============= Init TPM TIS Driver ==============\n");

   /*Sanity check the localities input */
   if(localities & ~TPM_TIS_EN_LOCLALL) {
      printk("init_tpm_tis() Invalid locality specification! %X\n", localities);
      goto abort_egress;
   }

   printk("IOMEM Machine Base Address: %lX\n", baseaddr);

   /* Create the tpm data structure */
   tpm = malloc(sizeof(struct tpm_chip));
   __init_tpm_chip(tpm);

   /* Set the enabled localities - if 0 we leave default as all enabled */
   if(localities != 0) {
      tpm->enabled_localities = localities;
   }
   printk("Enabled Localities: ");
   for(i = 0; i < 5; ++i) {
      if(locality_enabled(tpm, i)) {
	 printk("%d ", i);
      }
   }
   printk("\n");

   /* Set the base machine address */
   tpm->baseaddr = baseaddr;

   /* Set default timeouts */
   tpm->timeout_a = MILLISECS(TIS_SHORT_TIMEOUT);
   tpm->timeout_b = MILLISECS(TIS_LONG_TIMEOUT);
   tpm->timeout_c = MILLISECS(TIS_SHORT_TIMEOUT);
   tpm->timeout_d = MILLISECS(TIS_SHORT_TIMEOUT);

   /*Map the mmio pages */
   addr = tpm->baseaddr;
   for(i = 0; i < 5; ++i) {
      if(locality_enabled(tpm, i)) {
	 /* Map the page in now */
	 if((tpm->pages[i] = ioremap_nocache(addr, PAGE_SIZE)) == NULL) {
	    printk("Unable to map iomem page a address %p\n", addr);
	    goto abort_egress;
	 }

	 /* Set default locality to the first enabled one */
	 if (tpm->locality < 0) {
	    if(tpm_tis_request_locality(tpm, i) < 0) {
	       printk("Unable to request locality %d??\n", i);
	       goto abort_egress;
	    }
	 }
      }
      addr += PAGE_SIZE;
   }


   /* Get the vendor and device ids */
   didvid = ioread32(TPM_DID_VID(tpm, tpm->locality));
   tpm->did = didvid >> 16;
   tpm->vid = didvid & 0xFFFF;


   /* Get the revision id */
   tpm->rid = ioread8(TPM_RID(tpm, tpm->locality));

   printk("1.2 TPM (device-id=0x%X vendor-id = %X rev-id = %X)\n", tpm->did, tpm->vid, tpm->rid);

   intfcaps = ioread32(TPM_INTF_CAPS(tpm, tpm->locality));
   printk("TPM interface capabilities (0x%x):\n", intfcaps);
   if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
      printk("\tBurst Count Static\n");
   if (intfcaps & TPM_INTF_CMD_READY_INT)
      printk("\tCommand Ready Int Support\n");
   if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
      printk("\tInterrupt Edge Falling\n");
   if (intfcaps & TPM_INTF_INT_EDGE_RISING)
      printk("\tInterrupt Edge Rising\n");
   if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
      printk("\tInterrupt Level Low\n");
   if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
      printk("\tInterrupt Level High\n");
   if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
      printk("\tLocality Change Int Support\n");
   if (intfcaps & TPM_INTF_STS_VALID_INT)
      printk("\tSts Valid Int Support\n");
   if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
      printk("\tData Avail Int Support\n");

   /*Interupt setup */
   intmask = ioread32(TPM_INT_ENABLE(tpm, tpm->locality));

   intmask |= TPM_INTF_CMD_READY_INT
      | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
      | TPM_INTF_STS_VALID_INT;

   iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask);

   /*If interupts are enabled, handle it */
   if(irq) {
      if(irq != TPM_PROBE_IRQ) {
	 tpm->irq = irq;
      } else {
	 /*FIXME add irq probing feature later */
	 printk("IRQ probing not implemented\n");
      }
   }

   if(tpm->irq) {
      iowrite8(TPM_INT_VECTOR(tpm, tpm->locality), tpm->irq);

      if(bind_pirq(tpm->irq, 1, tpm_tis_irq_handler, tpm) != 0) {
	 printk("Unabled to request irq: %u for use\n", tpm->irq);
	 printk("Will use polling mode\n");
	 tpm->irq = 0;
      } else {
	 /* Clear all existing */
	 iowrite32(TPM_INT_STATUS(tpm, tpm->locality), ioread32(TPM_INT_STATUS(tpm, tpm->locality)));

	 /* Turn on interrupts */
	 iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask | TPM_GLOBAL_INT_ENABLE);
      }
   }

   if(tpm_get_timeouts(tpm)) {
      printk("Could not get TPM timeouts and durations\n");
      goto abort_egress;
   }
   tpm_continue_selftest(tpm);


   return tpm;
abort_egress:
   if(tpm != NULL) {
      shutdown_tpm_tis(tpm);
   }
   return NULL;
}
Exemple #13
0
/* Read a block from the fs, returning a pointer to a struct blockbuf. The
   block is locked as appropriate. YOU MUST CALL brelse() once you are
   finished with the block, otherwise it will sit around for ages. */
struct buffer_head *bread(ext2fs_st *st, uint32_t block, bool_t rw)
{
    struct buffer_head *buf;

    TRC(printf("bread %d\n", block));
    MU_LOCK(&st->cache.mu);

    buf = bfind(st, block);

    if (buf) {

	LINK_REMOVE(buf);
	MU_LOCK(&buf->mu);	/* Lock the buffer */
	LINK_ADD_TO_HEAD(&st->cache.bufs, buf);	/* Pull to front */
	MU_RELEASE(&st->cache.mu); /* Don't need the cache lacked any more */

	if (rw) {
	    /* We're going for exclusive access. Anything else just
	       isn't good enough. */
	retry:
	    if (buf->state == buf_unlocked) {
		buf->state = buf_locked_readwrite;
		TRC(printf("ext2fs: cached block %d now locked readwrite\n",
			   block));
		/* ASSERT b_count==0 */
		MU_RELEASE(&buf->mu);
		return buf;
	    }
	    
	    TRC(printf("Correct block cached, but already inuse -> retry\n"));
	    
	    /* We can't get it now; wait for a signal and try again */
	    WAIT(&buf->mu, &buf->cv);
	    goto retry;
	} else {
	    buf->state = buf_locked_readonly;
	}
	buf->b_count++;

	buf->lastused = NOW();
	buf->refd++;

	MU_RELEASE(&buf->mu);
	return buf;

    } else {
	/* Wait on the LRU buffer */
	while (! (buf = balloc(st))) {
	    TRC(printf("waiting...\n"));
	    WAIT(&st->cache.mu, &st->cache.freebufs);
	    TRC(printf("retry...\n"));
	}
	LINK_REMOVE(buf);

	if (buf->state) {
	    TRC(printf("E %d %d %d %d\n", 
		   buf->b_blocknr, buf->refd,
		   (buf->lastused - buf->firstused) / MILLISECS(1),
		   (NOW() - buf->lastused) / MILLISECS(1)));
	}

	MU_RELEASE(&st->cache.mu);
	/* At this point the block is either unlocked or empty. We can fetch
	   a new block into it. */
	TRC(printf("reading %d\n", block));
	memset(buf->b_data, 0xaa, st->fsc.block_size);
	if (!logical_read(st, block, 1, buf->b_data, st->fsc.block_size)) {
	    printf("ext2fs: getblock: read of block %d failed\n",
		   block);
	    /* XXX what now? */
	}
	buf->firstused = buf->lastused  = NOW();
	buf->refd      = 1;
	buf->b_size    = st->fsc.block_size;
	buf->b_blocknr = block;
	buf->b_count++;
	if (rw) {
	    buf->state = buf_locked_readwrite;
	} else {
	    buf->state = buf_locked_readonly;
	    /* ASSERT refcount==1 */
	}
	MU_LOCK(&st->cache.mu);
	LINK_ADD_TO_HEAD(&st->cache.bufs, buf);	/* Pull to front */
	MU_RELEASE(&st->cache.mu);
    }

    return buf;
}
Exemple #14
0
static void nmi_timer_fn(void *unused)
{
    this_cpu(nmi_timer_ticks)++;
    set_timer(&this_cpu(nmi_timer), NOW() + MILLISECS(1000));
}
Exemple #15
0
/* Bring up a remote CPU */
int __cpu_up(unsigned int cpu)
{
    int rc;
    s_time_t deadline;

    printk("Bringing up CPU%d\n", cpu);

    rc = init_secondary_pagetables(cpu);
    if ( rc < 0 )
        return rc;

    console_start_sync(); /* Secondary may use early_printk */

    /* Tell the remote CPU which stack to boot on. */
    init_data.stack = idle_vcpu[cpu]->arch.stack;

    /* Tell the remote CPU what its logical CPU ID is. */
    init_data.cpuid = cpu;

    /* Open the gate for this CPU */
    smp_up_cpu = cpu_logical_map(cpu);
    clean_dcache(smp_up_cpu);

    rc = arch_cpu_up(cpu);

    console_end_sync();

    if ( rc < 0 )
    {
        printk("Failed to bring up CPU%d\n", cpu);
        return rc;
    }

    deadline = NOW() + MILLISECS(1000);

    while ( !cpu_online(cpu) && NOW() < deadline )
    {
        cpu_relax();
        process_pending_softirqs();
    }

    /*
     * Nuke start of day info before checking one last time if the CPU
     * actually came online. If it is not online it may still be
     * trying to come up and may show up later unexpectedly.
     *
     * This doesn't completely avoid the possibility of the supposedly
     * failed CPU trying to progress with another CPUs stack settings
     * etc, but better than nothing, hopefully.
     */
    init_data.stack = NULL;
    init_data.cpuid = ~0;
    smp_up_cpu = MPIDR_INVALID;
    clean_dcache(smp_up_cpu);

    if ( !cpu_online(cpu) )
    {
        printk("CPU%d never came online\n", cpu);
        return -EIO;
    }

    return 0;
}
Exemple #16
0
void boot(Rd_clp rd, bool_t verbose, string_t cmdline)
{
#ifdef INTEL
    uint32_t NOCLOBBER length;
    uint32_t start, slen;
    uint8_t * NOCLOBBER buff;
    Stretch_clp stretch;
    
    length=Rd$Length(rd);

    /* Read the header of the file to try to work out what it is */

    buff=Heap$Malloc(Pvs(heap), 512);
    Rd$GetChars(rd, buff, 512);

    if (buff[510]==0x55 && buff[511]==0xaa) {
	if (verbose) printf(
	    "Traditional bootable Nemesis image with %d setup blocks.\n",
	    buff[497]);
	start=512*(buff[497]+1);
    } else if (buff[0]==0xfa && buff[1]==0xfc ) {
	if (verbose) printf("Hmm... looks like a bare Nemesis image.\n");
	start=0;
    } else {
	printf("This does not look like a Nemesis image.\n");
	return;
    }

    Heap$Free(Pvs(heap), buff);

    length-=start;

    /* Get a stretch of the appropriate size */
    stretch = STR_NEW(length);
    buff    = STR_RANGE(stretch, &slen);

    /* Read the image */
    Rd$Seek(rd, start);
#define PKTSIZE 8192
    {
	char *bufptr = buff;
	int n;
	int togo = length;

	while (togo) {
	    n = Rd$GetChars(rd, bufptr, PKTSIZE);
	    togo -= n;
	    bufptr += n;
	    printf("."); fflush(stdout);
	} 
    }
    printf("\n");

    /* now shut down any Ethernet cards, so we don't get anything DMAed
     * over the image while it boots */
    /* XXX AND This is a privileged operation.  And we shouldn't really
     * dive at the Netif interface directly. */
    /* XXX SDE we might be in a UDPnash at the moment, so there should be
     * NO OUTPUT after this point */
    verbose=False;
    {
	Type_Any any;
	Netif_clp netif;
	const char * const downlist[] = {"de4x5-0",
					 "de4x5-1",
					 /* don't need 3c509, since no DMA */
					 "eth0",
					 NULL};
	/* HACK: need to lose the "const", since Context$Get() doesn't
         * have it. */
	char ** NOCLOBBER eth = (char**)downlist;
	char buf[32];

	while(*eth)
	{
	    sprintf(buf, "dev>%s>control", *eth);

	    if (Context$Get(Pvs(root), buf, &any))
	    {
		if (verbose)
		{
		    printf("Shutting down %s... ", *eth);
		    fflush(stdout);
		}

		TRY {
		    netif = IDC_OPEN(buf, Netif_clp);
		} CATCH_ALL {
		    if (verbose)
			printf("failed: caught exception\n");
		    netif = NULL;
		} ENDTRY;

		if (netif && !Netif$Down(netif) && verbose)
		{
		    printf("failed: Netif$Down() error\n");
		    netif = NULL;
		}

		if (netif && verbose)
		    printf("ok\n");
	    }

	    eth++;
	}
    }

    if (verbose)
    {
	printf("Starting image...\n");
	PAUSE(MILLISECS(1000));
    }

    /* Chain to the new image */
    ENTER_KERNEL_CRITICAL_SECTION();
    ntsc_chain(buff, length, cmdline);
    LEAVE_KERNEL_CRITICAL_SECTION();

    printf("Bogosity: the new image was not started\n");
    printf("bogosity: (n) (slang); the degree of unusallness and brokenness of some item or event\n");
#else
    printf("Boot chaining not supported\n");
#endif
}
Exemple #17
0
** overflowing or audio breaking up, etc.  For more details see the
** Measure project web pages:
**
**   http://www.cl.cam.ac.uk/Research/SRG/measure/
**
** (don't want this code inline in the scheduler since it confuses
** an already complicated lump of code)
*/

#ifdef CONFIG_MEASURE_KERNEL_ACCOUNTING
/* 
 * Sampling preemption. For accurate accounting we measure as close as possble
 * to a defined sampling period
 */
static Time_ns ac_next = 0; /* 0 is start case... */
static Time_ns ac_period = MILLISECS(100); /* Set this to something sensible */

/* Measure style kernel accounting */
static void ac_measure(kernel_st *st, Time_ns time)
{
    SDom_t *sdom;

    /* start case */
    if (!ac_next) ac_next = time;

    if (time >= ac_next) /* we've reached the end of a measure period */
    {
	unsigned int n,nmax;
	/* Whizz around *all* sdoms copying ac_m_tm to ac_m_lm */
	nmax = PQ_SIZE(st->wait);
	for (n=1; n <= nmax; n++)
Exemple #18
0
int tpm_get_timeouts(struct tpm_chip *chip)
{
   struct tpm_cmd_t tpm_cmd;
   struct timeout_t *timeout_cap;
   struct duration_t *duration_cap;
   ssize_t rc;
   uint32_t timeout;
   unsigned int scale = 1;

   tpm_cmd.header.in = tpm_getcap_header;
   tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
   tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
   tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;

   if((rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
	 "attempting to determine the timeouts")) != 0) {
      printk("transmit failed %d\n", rc);
      goto duration;
   }

   if(be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
         be32_to_cpu(tpm_cmd.header.out.length) !=
         sizeof(tpm_cmd.header.out) + sizeof(uint32_t) + 4 * sizeof(uint32_t)) {
      return -EINVAL;
   }

   timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
   /* Don't overwrite default if value is 0 */
   timeout = be32_to_cpu(timeout_cap->a);
   if(timeout && timeout < 1000) {
      /* timeouts in msc rather usec */
      scale = 1000;
   }
   if (timeout)
      chip->timeout_a = MICROSECS(timeout * scale); /*Convert to msec */
   ADJUST_TIMEOUTS_TO_STANDARD(chip->timeout_a,MILLISECS(TIS_SHORT_TIMEOUT),'a');

   timeout = be32_to_cpu(timeout_cap->b);
   if (timeout)
      chip->timeout_b = MICROSECS(timeout * scale); /*Convert to msec */
   ADJUST_TIMEOUTS_TO_STANDARD(chip->timeout_b,MILLISECS(TIS_LONG_TIMEOUT),'b');

   timeout = be32_to_cpu(timeout_cap->c);
   if (timeout)
      chip->timeout_c = MICROSECS(timeout * scale); /*Convert to msec */
   ADJUST_TIMEOUTS_TO_STANDARD(chip->timeout_c,MILLISECS(TIS_SHORT_TIMEOUT),'c');

   timeout = be32_to_cpu(timeout_cap->d);
   if (timeout)
      chip->timeout_d = MICROSECS(timeout * scale); /*Convert to msec */
   ADJUST_TIMEOUTS_TO_STANDARD(chip->timeout_d,MILLISECS(TIS_SHORT_TIMEOUT),'d');

duration:
   tpm_cmd.header.in = tpm_getcap_header;
   tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
   tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
   tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;

   if((rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
	 "attempting to determine the durations")) < 0) {
      return rc;
   }

   if(be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
         be32_to_cpu(tpm_cmd.header.out.length) !=
         sizeof(tpm_cmd.header.out) + sizeof(uint32_t) + 3 * sizeof(uint32_t)) {
      return -EINVAL;
   }

   duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
   chip->duration[TPM_SHORT] = MICROSECS(be32_to_cpu(duration_cap->tpm_short));
   chip->duration[TPM_MEDIUM] = MICROSECS(be32_to_cpu(duration_cap->tpm_medium));
   chip->duration[TPM_LONG] = MICROSECS(be32_to_cpu(duration_cap->tpm_long));

   /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
    * value wrong and apparently reports msecs rather than usecs. So we
    * fix up the resulting too-small TPM_SHORT value to make things work.
    */
   if (chip->duration[TPM_SHORT] < MILLISECS(10)) {
      chip->duration[TPM_SHORT] = SECONDS(1);
      chip->duration[TPM_MEDIUM] *= 1000;
      chip->duration[TPM_LONG] *= 1000;
      printk("Adjusting TPM timeout parameters\n");
   }

   return 0;
}