Пример #1
0
Файл: acpica.c Проект: olsner/os
static void MsgClaimPci(uintptr_t rcpt, uintptr_t addr, uintptr_t pins)
{
	addr &= 0xffff;
	ACPI_PCI_ID id = { 0, (addr >> 8) & 0xff, (addr >> 3) & 31, addr & 7 };
	log(claim_pci, "claim pci %02x:%02x.%x\n", id.Bus, id.Device, id.Function);

	// Set up whatever stuff to track PCI device drivers in general

	int irqs[4] = {0};
	for (int pin = 0; pin < 4; pin++) {
		if (!(pins & (1 << pin))) continue;

		ACPI_STATUS status = RouteIRQ(&id, 0, &irqs[pin]);
		CHECK_STATUS("RouteIRQ");
		log(claim_pci, "%02x:%02x.%x pin %d routed to IRQ %#x\n",
			id.Bus, id.Device, id.Function,
			pin, irqs[pin]);
	}

	if (pins & ACPI_PCI_CLAIM_MASTER) {
		u64 value;
		AcpiOsReadPciConfiguration(&id, PCI_COMMAND, &value, 16);
		if (!(value & PCI_COMMAND_MASTER)) {
			value |= PCI_COMMAND_MASTER;
			AcpiOsWritePciConfiguration(&id, PCI_COMMAND, value, 16);
		}
	}

	pins = (u64)irqs[3] << 48 | (u64)irqs[2] << 32 | irqs[1] << 16 | irqs[0];

	send2(MSG_ACPI_CLAIM_PCI, rcpt, addr, pins);
	hmod(rcpt, (uintptr_t)pci_device_handles + addr, 0);
	return;

failed:
	send2(MSG_ACPI_CLAIM_PCI, rcpt, 0, 0);
}

static size_t debugger_buffer_pos = 0;

static void debugger_pre_cmd(void) {
	debugger_buffer_pos = 0;
	AcpiGbl_MethodExecuting = FALSE;
	AcpiGbl_StepToNextCall = FALSE;
	AcpiDbSetOutputDestination(ACPI_DB_CONSOLE_OUTPUT);
}
Пример #2
0
/*************************************************************************
Neural network training  using  modified  Levenberg-Marquardt  with  exact
Hessian calculation and regularization. Subroutine trains  neural  network
with restarts from random positions. Algorithm is well  suited  for  small
and medium scale problems (hundreds of weights).

INPUT PARAMETERS:
    Network     -   neural network with initialized geometry
    XY          -   training set
    NPoints     -   training set size
    Decay       -   weight decay constant, >=0.001
                    Decay term 'Decay*||Weights||^2' is added to error
                    function.
                    If you don't know what Decay to choose, use 0.001.
    Restarts    -   number of restarts from random position, >0.
                    If you don't know what Restarts to choose, use 2.

OUTPUT PARAMETERS:
    Network     -   trained neural network.
    Info        -   return code:
                    * -9, if internal matrix inverse subroutine failed
                    * -2, if there is a point with class number
                          outside of [0..NOut-1].
                    * -1, if wrong parameters specified
                          (NPoints<0, Restarts<1).
                    *  2, if task has been solved.
    Rep         -   training report

  -- ALGLIB --
     Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
void mlptrainlm(multilayerperceptron& network,
     const ap::real_2d_array& xy,
     int npoints,
     double decay,
     int restarts,
     int& info,
     mlpreport& rep)
{
    int nin;
    int nout;
    int wcount;
    double lmftol;
    double lmsteptol;
    int i;
    int j;
    int k;
    int mx;
    double v;
    double e;
    double enew;
    double xnorm2;
    double stepnorm;
    ap::real_1d_array g;
    ap::real_1d_array d;
    ap::real_2d_array h;
    ap::real_2d_array hmod;
    ap::real_2d_array z;
    bool spd;
    double nu;
    double lambda;
    double lambdaup;
    double lambdadown;
    int cvcnt;
    double cvrelcnt;
    lbfgsreport internalrep;
    lbfgsstate state;
    ap::real_1d_array x;
    ap::real_1d_array y;
    ap::real_1d_array wbase;
    double wstep;
    ap::real_1d_array wdir;
    ap::real_1d_array wt;
    ap::real_1d_array wx;
    int pass;
    ap::real_1d_array wbest;
    double ebest;

    mlpproperties(network, nin, nout, wcount);
    lambdaup = 10;
    lambdadown = 0.3;
    lmftol = 0.001;
    lmsteptol = 0.001;
    
    //
    // Test for inputs
    //
    if( npoints<=0||restarts<1 )
    {
        info = -1;
        return;
    }
    if( mlpissoftmax(network) )
    {
        for(i = 0; i <= npoints-1; i++)
        {
            if( ap::round(xy(i,nin))<0||ap::round(xy(i,nin))>=nout )
            {
                info = -2;
                return;
            }
        }
    }
    decay = ap::maxreal(decay, mindecay);
    info = 2;
    
    //
    // Initialize data
    //
    rep.ngrad = 0;
    rep.nhess = 0;
    rep.ncholesky = 0;
    
    //
    // General case.
    // Prepare task and network. Allocate space.
    //
    mlpinitpreprocessor(network, xy, npoints);
    g.setbounds(0, wcount-1);
    h.setbounds(0, wcount-1, 0, wcount-1);
    hmod.setbounds(0, wcount-1, 0, wcount-1);
    wbase.setbounds(0, wcount-1);
    wdir.setbounds(0, wcount-1);
    wbest.setbounds(0, wcount-1);
    wt.setbounds(0, wcount-1);
    wx.setbounds(0, wcount-1);
    ebest = ap::maxrealnumber;
    
    //
    // Multiple passes
    //
    for(pass = 1; pass <= restarts; pass++)
    {
        
        //
        // Initialize weights
        //
        mlprandomize(network);
        
        //
        // First stage of the hybrid algorithm: LBFGS
        //
        ap::vmove(&wbase(0), &network.weights(0), ap::vlen(0,wcount-1));
        minlbfgs(wcount, ap::minint(wcount, 5), wbase, 0.0, 0.0, 0.0, ap::maxint(25, wcount), 0, state);
        while(minlbfgsiteration(state))
        {
            
            //
            // gradient
            //
            ap::vmove(&network.weights(0), &state.x(0), ap::vlen(0,wcount-1));
            mlpgradbatch(network, xy, npoints, state.f, state.g);
            
            //
            // weight decay
            //
            v = ap::vdotproduct(&network.weights(0), &network.weights(0), ap::vlen(0,wcount-1));
            state.f = state.f+0.5*decay*v;
            ap::vadd(&state.g(0), &network.weights(0), ap::vlen(0,wcount-1), decay);
            
            //
            // next iteration
            //
            rep.ngrad = rep.ngrad+1;
        }
        minlbfgsresults(state, wbase, internalrep);
        ap::vmove(&network.weights(0), &wbase(0), ap::vlen(0,wcount-1));
        
        //
        // Second stage of the hybrid algorithm: LM
        //
        // Initialize H with identity matrix,
        // G with gradient,
        // E with regularized error.
        //
        mlphessianbatch(network, xy, npoints, e, g, h);
        v = ap::vdotproduct(&network.weights(0), &network.weights(0), ap::vlen(0,wcount-1));
        e = e+0.5*decay*v;
        ap::vadd(&g(0), &network.weights(0), ap::vlen(0,wcount-1), decay);
        for(k = 0; k <= wcount-1; k++)
        {
            h(k,k) = h(k,k)+decay;
        }
        rep.nhess = rep.nhess+1;
        lambda = 0.001;
        nu = 2;
        while(true)
        {
            
            //
            // 1. HMod = H+lambda*I
            // 2. Try to solve (H+Lambda*I)*dx = -g.
            //    Increase lambda if left part is not positive definite.
            //
            for(i = 0; i <= wcount-1; i++)
            {
                ap::vmove(&hmod(i, 0), &h(i, 0), ap::vlen(0,wcount-1));
                hmod(i,i) = hmod(i,i)+lambda;
            }
            spd = spdmatrixcholesky(hmod, wcount, true);
            rep.ncholesky = rep.ncholesky+1;
            if( !spd )
            {
                lambda = lambda*lambdaup*nu;
                nu = nu*2;
                continue;
            }
            if( !spdmatrixcholeskysolve(hmod, g, wcount, true, wdir) )
            {
                lambda = lambda*lambdaup*nu;
                nu = nu*2;
                continue;
            }
            ap::vmul(&wdir(0), ap::vlen(0,wcount-1), -1);
            
            //
            // Lambda found.
            // 1. Save old w in WBase
            // 1. Test some stopping criterions
            // 2. If error(w+wdir)>error(w), increase lambda
            //
            ap::vadd(&network.weights(0), &wdir(0), ap::vlen(0,wcount-1));
            xnorm2 = ap::vdotproduct(&network.weights(0), &network.weights(0), ap::vlen(0,wcount-1));
            stepnorm = ap::vdotproduct(&wdir(0), &wdir(0), ap::vlen(0,wcount-1));
            stepnorm = sqrt(stepnorm);
            enew = mlperror(network, xy, npoints)+0.5*decay*xnorm2;
            if( ap::fp_less(stepnorm,lmsteptol*(1+sqrt(xnorm2))) )
            {
                break;
            }
            if( ap::fp_greater(enew,e) )
            {
                lambda = lambda*lambdaup*nu;
                nu = nu*2;
                continue;
            }
            
            //
            // Optimize using inv(cholesky(H)) as preconditioner
            //
            if( !rmatrixtrinverse(hmod, wcount, true, false) )
            {
                
                //
                // if matrix can't be inverted then exit with errors
                // TODO: make WCount steps in direction suggested by HMod
                //
                info = -9;
                return;
            }
            ap::vmove(&wbase(0), &network.weights(0), ap::vlen(0,wcount-1));
            for(i = 0; i <= wcount-1; i++)
            {
                wt(i) = 0;
            }
            minlbfgs(wcount, wcount, wt, 0.0, 0.0, 0.0, 5, 0, state);
            while(minlbfgsiteration(state))
            {
                
                //
                // gradient
                //
                for(i = 0; i <= wcount-1; i++)
                {
                    v = ap::vdotproduct(&state.x(i), &hmod(i, i), ap::vlen(i,wcount-1));
                    network.weights(i) = wbase(i)+v;
                }
                mlpgradbatch(network, xy, npoints, state.f, g);
                for(i = 0; i <= wcount-1; i++)
                {
                    state.g(i) = 0;
                }
                for(i = 0; i <= wcount-1; i++)
                {
                    v = g(i);
                    ap::vadd(&state.g(i), &hmod(i, i), ap::vlen(i,wcount-1), v);
                }
                
                //
                // weight decay
                // grad(x'*x) = A'*(x0+A*t)
                //
                v = ap::vdotproduct(&network.weights(0), &network.weights(0), ap::vlen(0,wcount-1));
                state.f = state.f+0.5*decay*v;
                for(i = 0; i <= wcount-1; i++)
                {
                    v = decay*network.weights(i);
                    ap::vadd(&state.g(i), &hmod(i, i), ap::vlen(i,wcount-1), v);
                }
                
                //
                // next iteration
                //
                rep.ngrad = rep.ngrad+1;
            }
            minlbfgsresults(state, wt, internalrep);
            
            //
            // Accept new position.
            // Calculate Hessian
            //
            for(i = 0; i <= wcount-1; i++)
            {
                v = ap::vdotproduct(&wt(i), &hmod(i, i), ap::vlen(i,wcount-1));
                network.weights(i) = wbase(i)+v;
            }
            mlphessianbatch(network, xy, npoints, e, g, h);
            v = ap::vdotproduct(&network.weights(0), &network.weights(0), ap::vlen(0,wcount-1));
            e = e+0.5*decay*v;
            ap::vadd(&g(0), &network.weights(0), ap::vlen(0,wcount-1), decay);
            for(k = 0; k <= wcount-1; k++)
            {
                h(k,k) = h(k,k)+decay;
            }
            rep.nhess = rep.nhess+1;
            
            //
            // Update lambda
            //
            lambda = lambda*lambdadown;
            nu = 2;
        }
        
        //
        // update WBest
        //
        v = ap::vdotproduct(&network.weights(0), &network.weights(0), ap::vlen(0,wcount-1));
        e = 0.5*decay*v+mlperror(network, xy, npoints);
        if( ap::fp_less(e,ebest) )
        {
            ebest = e;
            ap::vmove(&wbest(0), &network.weights(0), ap::vlen(0,wcount-1));
        }
    }
    
    //
    // copy WBest to output
    //
    ap::vmove(&network.weights(0), &wbest(0), ap::vlen(0,wcount-1));
}
Пример #3
0
Файл: acpica.c Проект: olsner/os
void start() {
	ACPI_STATUS status = AE_OK;

	printf("acpica: starting...\n");

	// NB! Must be at least as large as physical memory - the ACPI tables could
	// be anywhere. (Could be handled by AcpiOsMapMemory though.)
	map(0, MAP_PHYS | PROT_READ | PROT_WRITE | PROT_NO_CACHE,
		(void*)ACPI_PHYS_BASE, 0, USER_MAP_MAX - ACPI_PHYS_BASE);

	__default_section_init();

    AcpiDbgLayer = 0;
    AcpiDbgLevel = ACPI_LV_REPAIR | ACPI_LV_INTERRUPTS;

	status = InitializeFullAcpi ();
	CHECK_STATUS("InitializeFullAcpi");

	int pic_mode = 0; // Default is PIC mode if something fails
	status = PrintAPICTable();
	CHECK_STATUS("PrintAPICTable");
	status = FindIOAPICs(&pic_mode);
	CHECK_STATUS("Find IOAPIC");
	status = ExecuteOSI(pic_mode);
	CHECK_STATUS("ExecuteOSI");
	// Tables we get in Bochs:
	// * DSDT: All the AML code
	// * FACS
	// * FACP
	// * APIC (= MADT)
	// * SSDT: Secondary System Description Table
	//   Contains more AML code loaded automatically by ACPICA
	// More tables on qemu:
	// * Another SSDT (Loaded by ACPICA)
	// * HPET table
//	PrintFACSTable();
//	PrintFACPTable();
	// TODO Iterate through and disable all pci interrupt link devices (call
	// _DIS). Then we'll enable the ones we actually intend to use.

	EnumeratePCI();

	AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, 1);
	//AcpiWriteBitRegister(ACPI_BITREG_POWER_BUTTON_ENABLE, 1);
	AcpiInstallGlobalEventHandler(GlobalEventHandler, NULL);
	AcpiEnableEvent(ACPI_EVENT_POWER_BUTTON, 0);

	for (;;) {
		ipc_dest_t rcpt = 0x100;
		ipc_arg_t arg = 0;
		ipc_arg_t arg2 = 0;
		ipc_msg_t msg = recv2(&rcpt, &arg, &arg2);
		//printf("acpica: Received %#lx from %#lx: %#lx %#lx\n", msg, rcpt, arg, arg2);
		if (msg == MSG_PULSE) {
			if (AcpiOsCheckInterrupt(rcpt, arg)) {
				continue;
			} else {
				printf("acpica: Unhandled pulse: %#x from %#lx\n", arg, rcpt);
			}
		}
		switch (msg & 0xff)
		{
		case MSG_ACPI_FIND_PCI:
			MsgFindPci(rcpt, arg);
			break;
		case MSG_ACPI_CLAIM_PCI:
			MsgClaimPci(rcpt, arg, arg2);
			break;
		// This feels a bit wrong, but as long as we use PIO access to PCI
		// configuration space, we need to serialize all accesses.
		case MSG_ACPI_READ_PCI:
			arg = PciReadWord((arg & 0x7ffffffc) | 0x80000000);
			send1(MSG_ACPI_READ_PCI, rcpt, arg);
			break;
		case MSG_ACPI_DEBUGGER_INIT:
			debugger_pre_cmd();
			send0(MSG_ACPI_DEBUGGER_INIT, rcpt);
			break;
		case MSG_ACPI_DEBUGGER_BUFFER:
			assert(debugger_buffer_pos < ACPI_DB_LINE_BUFFER_SIZE);
			AcpiGbl_DbLineBuf[debugger_buffer_pos++] = arg;
			send0(MSG_ACPI_DEBUGGER_BUFFER, rcpt);
			break;
		case MSG_ACPI_DEBUGGER_CMD:
			assert(debugger_buffer_pos < ACPI_DB_LINE_BUFFER_SIZE);
			AcpiGbl_DbLineBuf[debugger_buffer_pos++] = 0;
			putchar('\n');
			AcpiDbCommandDispatch(AcpiGbl_DbLineBuf, NULL, NULL);
			debugger_pre_cmd();
			send0(MSG_ACPI_DEBUGGER_CMD, rcpt);
			break;
		case MSG_ACPI_DEBUGGER_CLR_BUFFER:
			debugger_pre_cmd();
			send0(MSG_ACPI_DEBUGGER_CLR_BUFFER, rcpt);
			break;
		case MSG_REG_IRQ:
			RegIRQ(rcpt, arg);
			continue;
		case MSG_IRQ_ACK:
			AckIRQ(rcpt);
			continue;
		}
		// TODO Handle other stuff.
		if (rcpt == 0x100)
		{
			hmod(rcpt, 0, 0);
		}
	}
	__builtin_unreachable();

failed:
	printf("ACPI failed :( (status %x)\n", status);
	abort();
}