Esempio n. 1
0
static char lvl_char(const int lvl)
{
	switch (lvl)
	{
	case BT_LOG_VERBOSE:
		return 'V';
	case BT_LOG_DEBUG:
		return 'D';
	case BT_LOG_INFO:
		return 'I';
	case BT_LOG_WARN:
		return 'W';
	case BT_LOG_ERROR:
		return 'E';
	case BT_LOG_FATAL:
		return 'F';
	default:
		ASSERT_UNREACHABLE("Bad log level");
		return '?';
	}
}
Esempio n. 2
0
	static INLINE int apple_lvl(const int lvl)
	{
		switch (lvl)
		{
		case BT_LOG_VERBOSE:
			return 7; /* ASL_LEVEL_DEBUG / kCFLogLevelDebug */;
		case BT_LOG_DEBUG:
			return 7; /* ASL_LEVEL_DEBUG / kCFLogLevelDebug */;
		case BT_LOG_INFO:
			return 6; /* ASL_LEVEL_INFO / kCFLogLevelInfo */;
		case BT_LOG_WARN:
			return 4; /* ASL_LEVEL_WARNING / kCFLogLevelWarning */;
		case BT_LOG_ERROR:
			return 3; /* ASL_LEVEL_ERR / kCFLogLevelError */;
		case BT_LOG_FATAL:
			return 0; /* ASL_LEVEL_EMERG / kCFLogLevelEmergency */;
		default:
			ASSERT_UNREACHABLE("Bad log level");
			return 0; /* ASL_LEVEL_EMERG / kCFLogLevelEmergency */;
		}
	}
Esempio n. 3
0
	static INLINE int android_lvl(const int lvl)
	{
		switch (lvl)
		{
		case BT_LOG_VERBOSE:
			return ANDROID_LOG_VERBOSE;
		case BT_LOG_DEBUG:
			return ANDROID_LOG_DEBUG;
		case BT_LOG_INFO:
			return ANDROID_LOG_INFO;
		case BT_LOG_WARN:
			return ANDROID_LOG_WARN;
		case BT_LOG_ERROR:
			return ANDROID_LOG_ERROR;
		case BT_LOG_FATAL:
			return ANDROID_LOG_FATAL;
		default:
			ASSERT_UNREACHABLE("Bad log level");
			return ANDROID_LOG_UNKNOWN;
		}
	}
Esempio n. 4
0
File: none.c Progetto: mirage/xen
static void _update_paging_modes(struct vcpu *v)
{
    ASSERT_UNREACHABLE();
}
Esempio n. 5
0
File: none.c Progetto: mirage/xen
static void _update_cr3(struct vcpu *v, int do_locking)
{
    ASSERT_UNREACHABLE();
}
Esempio n. 6
0
File: none.c Progetto: mirage/xen
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
                                 unsigned long va, uint32_t *pfec)
{
    ASSERT_UNREACHABLE();
    return gfn_x(INVALID_GFN);
}
Esempio n. 7
0
File: none.c Progetto: mirage/xen
static bool_t _invlpg(struct vcpu *v, unsigned long va)
{
    ASSERT_UNREACHABLE();
    return 1;
}
Esempio n. 8
0
File: none.c Progetto: mirage/xen
static int _page_fault(struct vcpu *v, unsigned long va,
                       struct cpu_user_regs *regs)
{
    ASSERT_UNREACHABLE();
    return 0;
}
Esempio n. 9
0
int arch_monitor_domctl_event(struct domain *d,
                              struct xen_domctl_monitor_op *mop)
{
    struct arch_domain *ad = &d->arch;
    bool_t requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op);

    switch ( mop->event )
    {
    case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
    {
        unsigned int ctrlreg_bitmask;
        bool_t old_status;

        /* sanity check: avoid left-shift undefined behavior */
        if ( unlikely(mop->u.mov_to_cr.index > 31) )
            return -EINVAL;

        ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index);
        old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask);

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);

        if ( mop->u.mov_to_cr.sync )
            ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask;

        if ( mop->u.mov_to_cr.onchangeonly )
            ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;

        if ( requested_status )
            ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;

        if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
        {
            struct vcpu *v;
            /* Latches new CR3 mask through CR0 code. */
            for_each_vcpu ( d, v )
                hvm_update_guest_cr(v, 0);
        }

        domain_unpause(d);

        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
    {
        bool_t old_status;
        int rc;
        u32 msr = mop->u.mov_to_msr.msr;

        domain_pause(d);

        old_status = monitored_msr(d, msr);

        if ( unlikely(old_status == requested_status) )
        {
            domain_unpause(d);
            return -EEXIST;
        }

        if ( requested_status )
            rc = monitor_enable_msr(d, msr);
        else
            rc = monitor_disable_msr(d, msr);

        domain_unpause(d);

        return rc;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
    {
        bool_t old_status = ad->monitor.singlestep_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.singlestep_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT:
    {
        bool_t old_status = ad->monitor.software_breakpoint_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.software_breakpoint_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION:
    {
        bool_t old_status = ad->monitor.debug_exception_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.debug_exception_enabled = requested_status;
        ad->monitor.debug_exception_sync = requested_status ?
                                            mop->u.debug_exception.sync :
                                            0;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_CPUID:
    {
        bool_t old_status = ad->monitor.cpuid_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.cpuid_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    default:
        /*
         * Should not be reached unless arch_monitor_get_capabilities() is
         * not properly implemented.
         */
        ASSERT_UNREACHABLE();
        return -EOPNOTSUPP;
    }

    return 0;
}
Esempio n. 10
0
int hvm_process_io_intercept(const struct hvm_io_handler *handler,
                             ioreq_t *p)
{
    const struct hvm_io_ops *ops = handler->ops;
    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
    uint64_t data;
    uint64_t addr;

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            addr = (p->type == IOREQ_TYPE_COPY) ?
                   p->addr + step * i :
                   p->addr;
            rc = ops->read(handler, addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;

            if ( p->data_is_ptr )
            {
                switch ( hvm_copy_to_guest_phys(p->data + step * i,
                                                &data, p->size) )
                {
                case HVMCOPY_okay:
                    break;
                case HVMCOPY_bad_gfn_to_mfn:
                    /* Drop the write as real hardware would. */
                    continue;
                case HVMCOPY_bad_gva_to_gfn:
                case HVMCOPY_gfn_paged_out:
                case HVMCOPY_gfn_shared:
                    ASSERT_UNREACHABLE();
                    /* fall through */
                default:
                    domain_crash(current->domain);
                    return X86EMUL_UNHANDLEABLE;
                }
            }
            else
                p->data = data;
        }
    }
    else /* p->dir == IOREQ_WRITE */
    {
        for ( i = 0; i < p->count; i++ )
        {
            if ( p->data_is_ptr )
            {
                switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                                  p->size) )
                {
                case HVMCOPY_okay:
                    break;
                case HVMCOPY_bad_gfn_to_mfn:
                    data = ~0;
                    break;
                case HVMCOPY_bad_gva_to_gfn:
                case HVMCOPY_gfn_paged_out:
                case HVMCOPY_gfn_shared:
                    ASSERT_UNREACHABLE();
                    /* fall through */
                default:
                    domain_crash(current->domain);
                    return X86EMUL_UNHANDLEABLE;
                }
            }
            else
                data = p->data;

            addr = (p->type == IOREQ_TYPE_COPY) ?
                   p->addr + step * i :
                   p->addr;
            rc = ops->write(handler, addr, p->size, data);
            if ( rc != X86EMUL_OKAY )
                break;
        }
    }

    if ( i )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }
    else if ( rc == X86EMUL_UNHANDLEABLE )
    {
        /*
         * Don't forward entire batches to the device model: This would
         * prevent the internal handlers to see subsequent iterations of
         * the request.
         */
        p->count = 1;
    }

    return rc;
}