UINT32
AcpiEvGpeDispatch (
    ACPI_NAMESPACE_NODE     *GpeDevice,
    ACPI_GPE_EVENT_INFO     *GpeEventInfo,
    UINT32                  GpeNumber)
{
    ACPI_STATUS             Status;
    UINT32                  ReturnValue;


    ACPI_FUNCTION_TRACE (EvGpeDispatch);


    /* Invoke global event handler if present */

    AcpiGpeCount++;
    if (AcpiGbl_GlobalEventHandler)
    {
        AcpiGbl_GlobalEventHandler (ACPI_EVENT_TYPE_GPE, GpeDevice,
             GpeNumber, AcpiGbl_GlobalEventHandlerContext);
    }

    /*
     * Always disable the GPE so that it does not keep firing before
     * any asynchronous activity completes (either from the execution
     * of a GPE method or an asynchronous GPE handler.)
     *
     * If there is no handler or method to run, just disable the
     * GPE and leave it disabled permanently to prevent further such
     * pointless events from firing.
     */
    Status = AcpiHwLowSetGpe (GpeEventInfo, ACPI_GPE_DISABLE);
    if (ACPI_FAILURE (Status))
    {
        ACPI_EXCEPTION ((AE_INFO, Status,
            "Unable to disable GPE %02X", GpeNumber));
        return_UINT32 (ACPI_INTERRUPT_NOT_HANDLED);
    }

    /*
     * If edge-triggered, clear the GPE status bit now. Note that
     * level-triggered events are cleared after the GPE is serviced.
     */
    if ((GpeEventInfo->Flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
            ACPI_GPE_EDGE_TRIGGERED)
    {
        Status = AcpiHwClearGpe (GpeEventInfo);
        if (ACPI_FAILURE (Status))
        {
            ACPI_EXCEPTION ((AE_INFO, Status,
                "Unable to clear GPE %02X", GpeNumber));
            (void) AcpiHwLowSetGpe (GpeEventInfo,
                    ACPI_GPE_CONDITIONAL_ENABLE);
            return_UINT32 (ACPI_INTERRUPT_NOT_HANDLED);
        }
    }

    /*
     * Dispatch the GPE to either an installed handler or the control
     * method associated with this GPE (_Lxx or _Exx). If a handler
     * exists, we invoke it and do not attempt to run the method.
     * If there is neither a handler nor a method, leave the GPE
     * disabled.
     */
    switch (GpeEventInfo->Flags & ACPI_GPE_DISPATCH_MASK)
    {
    case ACPI_GPE_DISPATCH_HANDLER:

        /* Invoke the installed handler (at interrupt level) */

        ReturnValue = GpeEventInfo->Dispatch.Handler->Address (
            GpeDevice, GpeNumber,
            GpeEventInfo->Dispatch.Handler->Context);

        /* If requested, clear (if level-triggered) and reenable the GPE */

        if (ReturnValue & ACPI_REENABLE_GPE)
        {
            (void) AcpiEvFinishGpe (GpeEventInfo);
        }
        break;

    case ACPI_GPE_DISPATCH_METHOD:
    case ACPI_GPE_DISPATCH_NOTIFY:
        /*
         * Execute the method associated with the GPE
         * NOTE: Level-triggered GPEs are cleared after the method completes.
         */
        Status = AcpiOsExecute (OSL_GPE_HANDLER,
                    AcpiEvAsynchExecuteGpeMethod, GpeEventInfo);
        if (ACPI_FAILURE (Status))
        {
            ACPI_EXCEPTION ((AE_INFO, Status,
                "Unable to queue handler for GPE %02X - event disabled",
                GpeNumber));
        }
        break;

    default:
        /*
         * No handler or method to run!
         * 03/2010: This case should no longer be possible. We will not allow
         * a GPE to be enabled if it has no handler or method.
         */
        ACPI_ERROR ((AE_INFO,
            "No handler or method for GPE %02X, disabling event",
            GpeNumber));
        break;
    }

    return_UINT32 (ACPI_INTERRUPT_HANDLED);
}
Example #2
0
u32
acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
		    struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
	acpi_status status;
	u32 return_value;

	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);

	/* Invoke global event handler if present */

	acpi_gpe_count++;
	if (acpi_gbl_global_event_handler) {
		acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
					      gpe_number,
					      acpi_gbl_global_event_handler_context);
	}

	/*
	 * If edge-triggered, clear the GPE status bit now. Note that
	 * level-triggered events are cleared after the GPE is serviced.
	 */
	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
	    ACPI_GPE_EDGE_TRIGGERED) {
		status = acpi_hw_clear_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to clear GPE%02X", gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	/*
	 * Always disable the GPE so that it does not keep firing before
	 * any asynchronous activity completes (either from the execution
	 * of a GPE method or an asynchronous GPE handler.)
	 *
	 * If there is no handler or method to run, just disable the
	 * GPE and leave it disabled permanently to prevent further such
	 * pointless events from firing.
	 */
	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
	if (ACPI_FAILURE(status)) {
		ACPI_EXCEPTION((AE_INFO, status,
				"Unable to disable GPE%02X", gpe_number));
		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
	}

	/*
	 * Dispatch the GPE to either an installed handler or the control
	 * method associated with this GPE (_Lxx or _Exx). If a handler
	 * exists, we invoke it and do not attempt to run the method.
	 * If there is neither a handler nor a method, leave the GPE
	 * disabled.
	 */
	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
	case ACPI_GPE_DISPATCH_HANDLER:

		/* Invoke the installed handler (at interrupt level) */

		return_value =
		    gpe_event_info->dispatch.handler->address(gpe_device,
							      gpe_number,
							      gpe_event_info->
							      dispatch.handler->
							      context);

		/* If requested, clear (if level-triggered) and reenable the GPE */

		if (return_value & ACPI_REENABLE_GPE) {
			(void)acpi_ev_finish_gpe(gpe_event_info);
		}
		break;

	case ACPI_GPE_DISPATCH_METHOD:
	case ACPI_GPE_DISPATCH_NOTIFY:

		/*
		 * Execute the method associated with the GPE
		 * NOTE: Level-triggered GPEs are cleared after the method completes.
		 */
		status = acpi_os_execute(OSL_GPE_HANDLER,
					 acpi_ev_asynch_execute_gpe_method,
					 gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to queue handler for GPE%02X - event disabled",
					gpe_number));
		}
		break;

	default:

		/*
		 * No handler or method to run!
		 * 03/2010: This case should no longer be possible. We will not allow
		 * a GPE to be enabled if it has no handler or method.
		 */
		ACPI_ERROR((AE_INFO,
			    "No handler or method for GPE%02X, disabling event",
			    gpe_number));

		break;
	}

	return_UINT32(ACPI_INTERRUPT_HANDLED);
}
Example #3
0
UINT32
AcpiUtCheckAddressRange (
    ACPI_ADR_SPACE_TYPE     SpaceId,
    ACPI_PHYSICAL_ADDRESS   Address,
    UINT32                  Length,
    BOOLEAN                 Warn)
{
    ACPI_ADDRESS_RANGE      *RangeInfo;
    ACPI_PHYSICAL_ADDRESS   EndAddress;
    char                    *Pathname;
    UINT32                  OverlapCount = 0;


    ACPI_FUNCTION_TRACE (UtCheckAddressRange);


    if ((SpaceId != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
        (SpaceId != ACPI_ADR_SPACE_SYSTEM_IO))
    {
        return_UINT32 (0);
    }

    RangeInfo = AcpiGbl_AddressRangeList[SpaceId];
    EndAddress = Address + Length - 1;

    /* Check entire list for all possible conflicts */

    while (RangeInfo)
    {
        /*
         * Check if the requested address/length overlaps this
         * address range. There are four cases to consider:
         *
         * 1) Input address/length is contained completely in the
         *    address range
         * 2) Input address/length overlaps range at the range start
         * 3) Input address/length overlaps range at the range end
         * 4) Input address/length completely encompasses the range
         */
        if ((Address <= RangeInfo->EndAddress) &&
            (EndAddress >= RangeInfo->StartAddress))
        {
            /* Found an address range overlap */

            OverlapCount++;
            if (Warn)   /* Optional warning message */
            {
                Pathname = AcpiNsGetNormalizedPathname (RangeInfo->RegionNode, TRUE);

                ACPI_WARNING ((AE_INFO,
                    "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
                    AcpiUtGetRegionName (SpaceId),
                    ACPI_FORMAT_UINT64 (Address),
                    ACPI_FORMAT_UINT64 (EndAddress),
                    ACPI_FORMAT_UINT64 (RangeInfo->StartAddress),
                    ACPI_FORMAT_UINT64 (RangeInfo->EndAddress),
                    Pathname));
                ACPI_FREE (Pathname);
            }
        }

        RangeInfo = RangeInfo->Next;
    }

    return_UINT32 (OverlapCount);
}
Example #4
0
u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
	acpi_status status;

	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);

	acpi_os_gpe_count(gpe_number);

	/*
	 * If edge-triggered, clear the GPE status bit now. Note that
	 * level-triggered events are cleared after the GPE is serviced.
	 */
	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
	    ACPI_GPE_EDGE_TRIGGERED) {
		status = acpi_hw_clear_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to clear GPE[%2X]",
					gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	/*
	 * Dispatch the GPE to either an installed handler, or the control method
	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
	 * it and do not attempt to run the method. If there is neither a handler
	 * nor a method, we disable this GPE to prevent further such pointless
	 * events from firing.
	 */
	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
	case ACPI_GPE_DISPATCH_HANDLER:

		/*
		 * Invoke the installed handler (at interrupt level)
		 * Ignore return status for now.
		 * TBD: leave GPE disabled on error?
		 */
		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
								dispatch.
								handler->
								context);

		/* It is now safe to clear level-triggered events. */

		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
		    ACPI_GPE_LEVEL_TRIGGERED) {
			status = acpi_hw_clear_gpe(gpe_event_info);
			if (ACPI_FAILURE(status)) {
				ACPI_EXCEPTION((AE_INFO, status,
						"Unable to clear GPE[%2X]",
						gpe_number));
				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
			}
		}
		break;

	case ACPI_GPE_DISPATCH_METHOD:

		/*
		 * Disable the GPE, so it doesn't keep firing before the method has a
		 * chance to run (it runs asynchronously with interrupts enabled).
		 */
		status = acpi_ev_disable_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to disable GPE[%2X]",
					gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}

		/*
		 * Execute the method associated with the GPE
		 * NOTE: Level-triggered GPEs are cleared after the method completes.
		 */
		status = acpi_os_execute(OSL_GPE_HANDLER,
					 acpi_ev_asynch_execute_gpe_method,
					 gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to queue handler for GPE[%2X] - event disabled",
					gpe_number));
		}
		break;

	default:

		/* No handler or method to run! */

		ACPI_ERROR((AE_INFO,
			    "No handler or method for GPE[%2X], disabling event",
			    gpe_number));

		/*
		 * Disable the GPE. The GPE will remain disabled until the ACPICA
		 * Core Subsystem is restarted, or a handler is installed.
		 */
		status = acpi_ev_disable_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to disable GPE[%2X]",
					gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}
		break;
	}

	return_UINT32(ACPI_INTERRUPT_HANDLED);
}
Example #5
0
u32
acpi_ut_check_address_range(acpi_adr_space_type space_id,
			    acpi_physical_address address, u32 length, u8 warn)
{
	struct acpi_address_range *range_info;
	acpi_physical_address end_address;
	char *pathname;
	u32 overlap_count = 0;

	ACPI_FUNCTION_TRACE(ut_check_address_range);

	if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
	    (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
		return_UINT32(0);
	}

	range_info = acpi_gbl_address_range_list[space_id];
	end_address = address + length - 1;

	/* Check entire list for all possible conflicts */

	while (range_info) {
		/*
		 * Check if the requested address/length overlaps this
		 * address range. There are four cases to consider:
		 *
		 * 1) Input address/length is contained completely in the
		 *    address range
		 * 2) Input address/length overlaps range at the range start
		 * 3) Input address/length overlaps range at the range end
		 * 4) Input address/length completely encompasses the range
		 */
		if ((address <= range_info->end_address) &&
		    (end_address >= range_info->start_address)) {

			/* Found an address range overlap */

			overlap_count++;
			if (warn) {	/* Optional warning message */
				pathname =
				    acpi_ns_get_normalized_pathname(range_info->
								    region_node,
								    TRUE);

				ACPI_WARNING((AE_INFO,
					      "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
					      acpi_ut_get_region_name(space_id),
					      ACPI_FORMAT_UINT64(address),
					      ACPI_FORMAT_UINT64(end_address),
					      ACPI_FORMAT_UINT64(range_info->
								 start_address),
					      ACPI_FORMAT_UINT64(range_info->
								 end_address),
					      pathname));
				ACPI_FREE(pathname);
			}
		}

		range_info = range_info->next;
	}

	return_UINT32(overlap_count);
}
Example #6
0
File: exprep.c Project: PyroOS/Pyro
static u32
acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
			    u8 field_flags, u32 * return_byte_alignment)
{
	u32 access;
	u32 byte_alignment;
	u32 bit_length;

	ACPI_FUNCTION_TRACE(ex_decode_field_access);

	access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);

	switch (access) {
	case AML_FIELD_ACCESS_ANY:

#ifdef ACPI_UNDER_DEVELOPMENT
		byte_alignment =
		    acpi_ex_generate_access(obj_desc->common_field.
					    start_field_bit_offset,
					    obj_desc->common_field.bit_length,
					    0xFFFFFFFF
					    /* Temp until we pass region_length as parameter */
		    );
		bit_length = byte_alignment * 8;
#endif

		byte_alignment = 1;
		bit_length = 8;
		break;

	case AML_FIELD_ACCESS_BYTE:
	case AML_FIELD_ACCESS_BUFFER:	/* ACPI 2.0 (SMBus Buffer) */
		byte_alignment = 1;
		bit_length = 8;
		break;

	case AML_FIELD_ACCESS_WORD:
		byte_alignment = 2;
		bit_length = 16;
		break;

	case AML_FIELD_ACCESS_DWORD:
		byte_alignment = 4;
		bit_length = 32;
		break;

	case AML_FIELD_ACCESS_QWORD:	/* ACPI 2.0 */
		byte_alignment = 8;
		bit_length = 64;
		break;

	default:
		/* Invalid field access type */

		ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
		return_UINT32(0);
	}

	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
		/*
		 * buffer_field access can be on any byte boundary, so the
		 * byte_alignment is always 1 byte -- regardless of any byte_alignment
		 * implied by the field access type.
		 */
		byte_alignment = 1;
	}

	*return_byte_alignment = byte_alignment;
	return_UINT32(bit_length);
}
Example #7
0
UINT32
AcpiNsBuildNormalizedPath (
    ACPI_NAMESPACE_NODE     *Node,
    char                    *FullPath,
    UINT32                  PathSize,
    BOOLEAN                 NoTrailing)
{
    UINT32                  Length = 0, i;
    char                    Name[ACPI_NAME_SIZE];
    BOOLEAN                 DoNoTrailing;
    char                    c, *Left, *Right;
    ACPI_NAMESPACE_NODE     *NextNode;


    ACPI_FUNCTION_TRACE_PTR (NsBuildNormalizedPath, Node);


#define ACPI_PATH_PUT8(Path, Size, Byte, Length)    \
    do {                                            \
        if ((Length) < (Size))                      \
        {                                           \
            (Path)[(Length)] = (Byte);              \
        }                                           \
        (Length)++;                                 \
    } while (0)

    /*
     * Make sure the PathSize is correct, so that we don't need to
     * validate both FullPath and PathSize.
     */
    if (!FullPath)
    {
        PathSize = 0;
    }

    if (!Node)
    {
        goto BuildTrailingNull;
    }

    NextNode = Node;
    while (NextNode && NextNode != AcpiGbl_RootNode)
    {
        if (NextNode != Node)
        {
            ACPI_PATH_PUT8(FullPath, PathSize, AML_DUAL_NAME_PREFIX, Length);
        }

        ACPI_MOVE_32_TO_32 (Name, &NextNode->Name);
        DoNoTrailing = NoTrailing;
        for (i = 0; i < 4; i++)
        {
            c = Name[4-i-1];
            if (DoNoTrailing && c != '_')
            {
                DoNoTrailing = FALSE;
            }
            if (!DoNoTrailing)
            {
                ACPI_PATH_PUT8(FullPath, PathSize, c, Length);
            }
        }

        NextNode = NextNode->Parent;
    }

    ACPI_PATH_PUT8(FullPath, PathSize, AML_ROOT_PREFIX, Length);

    /* Reverse the path string */

    if (Length <= PathSize)
    {
        Left = FullPath;
        Right = FullPath+Length - 1;

        while (Left < Right)
        {
            c = *Left;
            *Left++ = *Right;
            *Right-- = c;
        }
    }

    /* Append the trailing null */

BuildTrailingNull:
    ACPI_PATH_PUT8 (FullPath, PathSize, '\0', Length);

#undef ACPI_PATH_PUT8

    return_UINT32 (Length);
}
Example #8
0
static UINT32
AcpiExDecodeFieldAccess (
    ACPI_OPERAND_OBJECT     *ObjDesc,
    UINT8                   FieldFlags,
    UINT32                  *ReturnByteAlignment)
{
    UINT32                  Access;
    UINT32                  ByteAlignment;
    UINT32                  BitLength;


    ACPI_FUNCTION_TRACE (ExDecodeFieldAccess);


    Access = (FieldFlags & AML_FIELD_ACCESS_TYPE_MASK);

    switch (Access)
    {
    case AML_FIELD_ACCESS_ANY:

#ifdef ACPI_UNDER_DEVELOPMENT
        ByteAlignment =
            AcpiExGenerateAccess (ObjDesc->CommonField.StartFieldBitOffset,
                ObjDesc->CommonField.BitLength,
                0xFFFFFFFF /* Temp until we pass RegionLength as parameter */);
        BitLength = ByteAlignment * 8;
#endif

        ByteAlignment = 1;
        BitLength = 8;
        break;

    case AML_FIELD_ACCESS_BYTE:
    case AML_FIELD_ACCESS_BUFFER:   /* ACPI 2.0 (SMBus Buffer) */

        ByteAlignment = 1;
        BitLength     = 8;
        break;

    case AML_FIELD_ACCESS_WORD:

        ByteAlignment = 2;
        BitLength     = 16;
        break;

    case AML_FIELD_ACCESS_DWORD:

        ByteAlignment = 4;
        BitLength     = 32;
        break;

    case AML_FIELD_ACCESS_QWORD:    /* ACPI 2.0 */

        ByteAlignment = 8;
        BitLength     = 64;
        break;

    default:

        /* Invalid field access type */

        ACPI_ERROR ((AE_INFO,
            "Unknown field access type 0x%X",
            Access));

        return_UINT32 (0);
    }

    if (ObjDesc->Common.Type == ACPI_TYPE_BUFFER_FIELD)
    {
        /*
         * BufferField access can be on any byte boundary, so the
         * ByteAlignment is always 1 byte -- regardless of any ByteAlignment
         * implied by the field access type.
         */
        ByteAlignment = 1;
    }

    *ReturnByteAlignment = ByteAlignment;
    return_UINT32 (BitLength);
}
Example #9
0
u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
	acpi_status status;

	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);

	acpi_os_gpe_count(gpe_number);

	
	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
	    ACPI_GPE_EDGE_TRIGGERED) {
		status = acpi_hw_clear_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to clear GPE[%2X]",
					gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	
	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
	case ACPI_GPE_DISPATCH_HANDLER:

		
		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
								dispatch.
								handler->
								context);

		

		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
		    ACPI_GPE_LEVEL_TRIGGERED) {
			status = acpi_hw_clear_gpe(gpe_event_info);
			if (ACPI_FAILURE(status)) {
				ACPI_EXCEPTION((AE_INFO, status,
						"Unable to clear GPE[%2X]",
						gpe_number));
				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
			}
		}
		break;

	case ACPI_GPE_DISPATCH_METHOD:

		
		status = acpi_ev_disable_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to disable GPE[%2X]",
					gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}

		
		status = acpi_os_execute(OSL_GPE_HANDLER,
					 acpi_ev_asynch_execute_gpe_method,
					 gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to queue handler for GPE[%2X] - event disabled",
					gpe_number));
		}
		break;

	default:

		

		ACPI_ERROR((AE_INFO,
			    "No handler or method for GPE[%2X], disabling event",
			    gpe_number));

		
		status = acpi_ev_disable_gpe(gpe_event_info);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Unable to disable GPE[%2X]",
					gpe_number));
			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
		}
		break;
	}

	return_UINT32(ACPI_INTERRUPT_HANDLED);
}
Example #10
0
static TPM_RESULT cap_property(UINT32 subCapSize, BYTE *subCap, 
                               UINT32 *respSize, BYTE **resp)
{
  UINT32 i, j, property;

  if (tpm_unmarshal_UINT32(&subCap, &subCapSize, &property))
    return TPM_BAD_MODE;
  switch (property) {
    case TPM_CAP_PROP_PCR:
      debug("[TPM_CAP_PROP_PCR]");
      return return_UINT32(respSize, resp, TPM_NUM_PCR);

    case TPM_CAP_PROP_DIR:
      debug("[TPM_CAP_PROP_DIR]");
      return return_UINT32(respSize, resp, 1);

    case TPM_CAP_PROP_MANUFACTURER:
      debug("[TPM_CAP_PROP_MANUFACTURER]");
      return return_UINT32(respSize, resp, TPM_MANUFACTURER);

    case TPM_CAP_PROP_KEYS:
      debug("[TPM_CAP_PROP_KEYS]");
      for (i = 0, j = TPM_MAX_KEYS; i < TPM_MAX_KEYS; i++)
        if (tpmData.permanent.data.keys[i].valid) j--;
      return return_UINT32(respSize, resp, j); 

    case TPM_CAP_PROP_MIN_COUNTER:
      debug("[TPM_CAP_PROP_MIN_COUNTER]");
      return return_UINT32(respSize, resp, 1);

    case TPM_CAP_PROP_AUTHSESS:
      debug("[TPM_CAP_PROP_AUTHSESS]");
      for (i = 0, j = TPM_MAX_SESSIONS; i < TPM_MAX_SESSIONS; i++)
        if (tpmData.stany.data.sessions[i].type != TPM_ST_INVALID) j--;
      return return_UINT32(respSize, resp, j);

    case TPM_CAP_PROP_TRANSESS:
      debug("[TPM_CAP_PROP_TRANSESS]");
      for (i = 0, j = TPM_MAX_SESSIONS; i < TPM_MAX_SESSIONS; i++)
        if (tpmData.stany.data.sessions[i].type != TPM_ST_INVALID) j--;
      return return_UINT32(respSize, resp, j);

    case TPM_CAP_PROP_COUNTERS:
      debug("[TPM_CAP_PROP_COUNTERS]");
      for (i = 0, j = TPM_MAX_COUNTERS; i < TPM_MAX_COUNTERS; i++)
        if (tpmData.permanent.data.counters[i].valid) j--;
      return return_UINT32(respSize, resp, j);

    case TPM_CAP_PROP_MAX_AUTHSESS:
      debug("[TPM_CAP_PROP_MAX_AUTHSESS]");
      return return_UINT32(respSize, resp, TPM_MAX_SESSIONS);

    case TPM_CAP_PROP_MAX_TRANSESS:
      debug("[TPM_CAP_PROP_MAX_TRANSESS]");
      return return_UINT32(respSize, resp, TPM_MAX_SESSIONS);

    case TPM_CAP_PROP_MAX_COUNTERS:
      debug("[TPM_CAP_PROP_MAX_COUNTERS]");
      return return_UINT32(respSize, resp, TPM_MAX_COUNTERS);

    case TPM_CAP_PROP_MAX_KEYS:
      debug("[TPM_CAP_PROP_MAX_KEYS]");
      return return_UINT32(respSize, resp, TPM_MAX_KEYS);

    case TPM_CAP_PROP_OWNER:
      debug("[TPM_CAP_PROP_OWNER]");
      return return_BOOL(respSize, resp, tpmData.permanent.flags.owned);

    case TPM_CAP_PROP_CONTEXT:
      debug("[TPM_CAP_PROP_CONTEXT]");
      for (i = 0, j = 0; i < TPM_MAX_SESSION_LIST; i++)
        if (tpmData.stany.data.contextList[i] == 0) j++;
      return return_UINT32(respSize, resp, j);

    case TPM_CAP_PROP_MAX_CONTEXT:
      debug("[TPM_CAP_PROP_MAX_CONTEXT]");
      return return_UINT32(respSize, resp, TPM_MAX_SESSION_LIST);

    case TPM_CAP_PROP_FAMILYROWS:
      debug("[TPM_CAP_PROP_FAMILYROWS]");
      /* TODO: TPM_CAP_PROP_FAMILYROWS */
      return TPM_FAIL;

    case TPM_CAP_PROP_TIS_TIMEOUT:
      debug("[TPM_CAP_PROP_TIS_TIMEOUT]");
      /* TODO: TPM_CAP_PROP_TIS_TIMEOUT: Measure these values and determine correct ones */
      UINT32 len = *respSize = 16;
      BYTE *ptr = *resp = tpm_malloc(*respSize);
      if (ptr == NULL || 
          tpm_marshal_UINT32(&ptr, &len, 200000) ||
          tpm_marshal_UINT32(&ptr, &len, 200000) ||
          tpm_marshal_UINT32(&ptr, &len, 200000) ||
          tpm_marshal_UINT32(&ptr, &len, 200000)) {
        tpm_free(*resp);
        return TPM_FAIL;
      }
      return TPM_SUCCESS;

    case TPM_CAP_PROP_STARTUP_EFFECT:
      debug("[TPM_CAP_PROP_STARTUP_EFFECT]");
      /* TODO: TPM_CAP_PROP_STARTUP_EFFECT */
      return TPM_FAIL;

    case TPM_CAP_PROP_DELEGATE_ROW:
      debug("[TPM_CAP_PROP_DELEGATE_ROW]");
      /* TODO: TPM_CAP_PROP_DELEGATE_ROW */
      return TPM_FAIL;

    case TPM_CAP_PROP_DAA_MAX:
      debug("[TPM_CAP_PROP_DAA_MAX]");
      return return_UINT32(respSize, resp, TPM_MAX_SESSIONS_DAA);

    case TPM_CAP_PROP_SESSION_DAA:
      debug("[TPM_CAP_PROP_SESSION_DAA]");
      for (i = 0, j = TPM_MAX_SESSIONS_DAA; i < TPM_MAX_SESSIONS_DAA; i++)
        if (tpmData.stany.data.sessionsDAA[i].type != TPM_ST_INVALID) j--;
      return return_UINT32(respSize, resp, j);

    case TPM_CAP_PROP_CONTEXT_DIST:
      debug("[TPM_CAP_PROP_CONTEXT_DIST]");
      /* TODO: TPM_CAP_PROP_CONTEXT_DIST */
      return TPM_FAIL;

    case TPM_CAP_PROP_DAA_INTERRUPT:
      debug("[TPM_CAP_PROP_DAA_INTERRUPT]");
      /* A value of TRUE indicates that the TPM will accept ANY command 
       * while executing a DAA Join or Sign. A value of FALSE indicates 
       * that the TPM will invalidate the DAA Join or Sign upon the 
       * receipt of any command other than the next join/sign in the 
       * session or a TPM_SaveContext. */
      return return_BOOL(respSize, resp, TRUE);

    case TPM_CAP_PROP_SESSIONS:
      debug("[TPM_CAP_PROP_SESSIONS]");
      for (i = 0, j = TPM_MAX_SESSIONS; i < TPM_MAX_SESSIONS; i++)
        if (tpmData.stany.data.sessions[i].type != TPM_ST_INVALID) j--;
      return return_UINT32(respSize, resp, j);

    case TPM_CAP_PROP_MAX_SESSIONS:
      debug("[TPM_CAP_PROP_MAX_SESSIONS]");
      return return_UINT32(respSize, resp, TPM_MAX_SESSIONS);

    case TPM_CAP_PROP_CMK_RESTRICTION:
      debug("[TPM_CAP_PROP_CMK_RESTRICTION]");
      /* TODO: TPM_CAP_PROP_CMK_RESTRICTION */
      return TPM_FAIL;

    case TPM_CAP_PROP_DURATION:
      debug("[TPM_CAP_PROP_DURATION]");
      /* TODO: TPM_CAP_PROP_DURATION: Measure these values and return accurate ones */
      BYTE dur[]= {0x0,0x0,0x0,0xc,0x0,0x7,0xa1,0x20,0x0,0x1e,0x84,0x80,0x11,0xe1,0xa3,0x0}; 
      *respSize = 16;
      *resp = tpm_malloc(*respSize);
      memcpy(*resp,dur,16); 
      return TPM_FAIL;

    case TPM_CAP_PROP_ACTIVE_COUNTER:
      debug("[TPM_CAP_PROP_ACTIVE_COUNTER]");
      /* TODO: TPM_CAP_PROP_ACTIVE_COUNTER */
      return TPM_FAIL;

    case TPM_CAP_PROP_MAX_NV_AVAILABLE:
      debug("[TPM_CAP_PROP_MAX_NV_AVAILABLE]");
      /* TODO: TPM_CAP_PROP_MAX_NV_AVAILABLE */
      return TPM_FAIL;

    case TPM_CAP_PROP_INPUT_BUFFER:
      debug("[TPM_CAP_PROP_INPUT_BUFFER]");
      /* TODO: TPM_CAP_PROP_INPUT_BUFFER */
      return TPM_FAIL;

    default:
      return TPM_BAD_MODE;
  }
}
Example #11
0
static u32
acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
                            u8 field_flags, u32 * return_byte_alignment)
{
    u32 access;
    u32 byte_alignment;
    u32 bit_length;

    ACPI_FUNCTION_TRACE(ex_decode_field_access);

    access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);

    switch (access) {
    case AML_FIELD_ACCESS_ANY:

#ifdef ACPI_UNDER_DEVELOPMENT
        byte_alignment =
            acpi_ex_generate_access(obj_desc->common_field.
                                    start_field_bit_offset,
                                    obj_desc->common_field.bit_length,
                                    0xFFFFFFFF

                                   );
        bit_length = byte_alignment * 8;
#endif

        byte_alignment = 1;
        bit_length = 8;
        break;

    case AML_FIELD_ACCESS_BYTE:
    case AML_FIELD_ACCESS_BUFFER:
        byte_alignment = 1;
        bit_length = 8;
        break;

    case AML_FIELD_ACCESS_WORD:
        byte_alignment = 2;
        bit_length = 16;
        break;

    case AML_FIELD_ACCESS_DWORD:
        byte_alignment = 4;
        bit_length = 32;
        break;

    case AML_FIELD_ACCESS_QWORD:
        byte_alignment = 8;
        bit_length = 64;
        break;

    default:


        ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
        return_UINT32(0);
    }

    if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {

        byte_alignment = 1;
    }

    *return_byte_alignment = byte_alignment;
    return_UINT32(bit_length);
}