static void __attribute__((unused)) build_assertions(void) { XC_BUILD_BUG_ON(sizeof(struct xc_sr_ihdr) != 24); XC_BUILD_BUG_ON(sizeof(struct xc_sr_dhdr) != 16); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rhdr) != 8); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_page_data_header) != 8); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_x86_pv_info) != 8); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_x86_pv_p2m_frames) != 8); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_x86_pv_vcpu_hdr) != 8); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_tsc_info) != 24); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_hvm_params_entry) != 16); XC_BUILD_BUG_ON(sizeof(struct xc_sr_rec_hvm_params) != 8); }
const uint32_t *xc_get_feature_deep_deps(uint32_t feature) { static const struct { uint32_t feature; uint32_t fs[FEATURESET_NR_ENTRIES]; } deep_deps[] = INIT_DEEP_DEPS; unsigned int start = 0, end = ARRAY_SIZE(deep_deps); XC_BUILD_BUG_ON(ARRAY_SIZE(deep_deps) != NR_DEEP_DEPS); /* deep_deps[] is sorted. Perform a binary search. */ while ( start < end ) { unsigned int mid = start + ((end - start) / 2); if ( deep_deps[mid].feature > feature ) end = mid; else if ( deep_deps[mid].feature < feature ) start = mid + 1; else return deep_deps[mid].fs; } return NULL; }
const uint32_t *xc_get_static_cpu_featuremask( enum xc_static_cpu_featuremask mask) { const static uint32_t known[FEATURESET_NR_ENTRIES] = INIT_KNOWN_FEATURES, special[FEATURESET_NR_ENTRIES] = INIT_SPECIAL_FEATURES, pv[FEATURESET_NR_ENTRIES] = INIT_PV_FEATURES, hvm_shadow[FEATURESET_NR_ENTRIES] = INIT_HVM_SHADOW_FEATURES, hvm_hap[FEATURESET_NR_ENTRIES] = INIT_HVM_HAP_FEATURES, deep_features[FEATURESET_NR_ENTRIES] = INIT_DEEP_FEATURES; XC_BUILD_BUG_ON(ARRAY_SIZE(known) != FEATURESET_NR_ENTRIES); XC_BUILD_BUG_ON(ARRAY_SIZE(special) != FEATURESET_NR_ENTRIES); XC_BUILD_BUG_ON(ARRAY_SIZE(pv) != FEATURESET_NR_ENTRIES); XC_BUILD_BUG_ON(ARRAY_SIZE(hvm_shadow) != FEATURESET_NR_ENTRIES); XC_BUILD_BUG_ON(ARRAY_SIZE(hvm_hap) != FEATURESET_NR_ENTRIES); XC_BUILD_BUG_ON(ARRAY_SIZE(deep_features) != FEATURESET_NR_ENTRIES); switch ( mask ) { case XC_FEATUREMASK_KNOWN: return known; case XC_FEATUREMASK_SPECIAL: return special; case XC_FEATUREMASK_PV: return pv; case XC_FEATUREMASK_HVM_SHADOW: return hvm_shadow; case XC_FEATUREMASK_HVM_HAP: return hvm_hap; case XC_FEATUREMASK_DEEP_FEATURES: return deep_features; default: return NULL; } }
static int xc_try_lzo1x_decode( struct xc_dom_image *dom, void **blob, size_t *size) { int ret; const unsigned char *cur = dom->kernel_blob; unsigned char *out_buf = NULL; size_t left = dom->kernel_size; const char *msg; unsigned version; static const unsigned char magic[] = { 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a }; /* * lzo_uint should match size_t. Check that this is the case to be * sure we won't overflow various lzo_uint fields. */ XC_BUILD_BUG_ON(sizeof(lzo_uint) != sizeof(size_t)); ret = lzo_init(); if ( ret != LZO_E_OK ) { DOMPRINTF("LZO1x: Failed to init library (%d)\n", ret); return -1; } if ( left < 16 || memcmp(cur, magic, 9) ) { DOMPRINTF("LZO1x: Unrecognized magic\n"); return -1; } /* get version (2bytes), skip library version (2), * 'need to be extracted' version (2) and method (1) */ version = lzo_read_16(cur + 9); cur += 16; left -= 16; if ( version >= 0x0940 ) { /* skip level */ ++cur; if ( left ) --left; } if ( left >= 4 && (lzo_read_32(cur) & LZOP_HEADER_HAS_FILTER) ) ret = 8; /* flags + filter info */ else ret = 4; /* flags */ /* skip mode and mtime_low */ ret += 8; if ( version >= 0x0940 ) ret += 4; /* skip mtime_high */ /* don't care about the file name, and skip checksum */ if ( left > ret ) ret += 1 + cur[ret] + 4; if ( left < ret ) { DOMPRINTF("LZO1x: Incomplete header\n"); return -1; } cur += ret; left -= ret; for ( *size = 0; ; ) { lzo_uint src_len, dst_len, out_len; unsigned char *tmp_buf; msg = "Short input"; if ( left < 4 ) break; dst_len = lzo_read_32(cur); if ( !dst_len ) return 0; if ( dst_len > LZOP_MAX_BLOCK_SIZE ) { msg = "Block size too large"; break; } if ( left < 12 ) break; src_len = lzo_read_32(cur + 4); cur += 12; /* also skip block checksum info */ left -= 12; msg = "Bad source length"; if ( src_len <= 0 || src_len > dst_len || src_len > left ) break; msg = "Output buffer overflow"; if ( *size > SIZE_MAX - dst_len ) break; msg = "Decompressed image too large"; if ( xc_dom_kernel_check_size(dom, *size + dst_len) ) break; msg = "Failed to (re)alloc memory"; tmp_buf = realloc(out_buf, *size + dst_len); if ( tmp_buf == NULL ) break; out_buf = tmp_buf; out_len = dst_len; ret = lzo1x_decompress_safe(cur, src_len, out_buf + *size, &out_len, NULL); switch ( ret ) { case LZO_E_OK: msg = "Input underrun"; if ( out_len != dst_len ) break; *blob = out_buf; *size += out_len; cur += src_len; left -= src_len; continue; case LZO_E_INPUT_NOT_CONSUMED: msg = "Unconsumed input"; break; case LZO_E_OUTPUT_OVERRUN: msg = "Output overrun"; break; case LZO_E_INPUT_OVERRUN: msg = "Input overrun"; break; case LZO_E_LOOKBEHIND_OVERRUN: msg = "Look-behind overrun"; break; case LZO_E_EOF_NOT_FOUND: msg = "No EOF marker"; break; case LZO_E_ERROR: msg = "General error"; break; default: msg = "Internal program error (bug)"; break; } break; } free(out_buf); DOMPRINTF("LZO1x decompression error: %s\n", msg); return -1; }
/* * 1. Get PM parameter * 2. Provide user PM control */ int xc_get_cpufreq_para(xc_interface *xch, int cpuid, struct xc_get_cpufreq_para *user_para) { DECLARE_SYSCTL; int ret = 0; struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para; DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus, user_para->affected_cpus, user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies, user_para->scaling_available_frequencies, user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors, user_para->scaling_available_governors, user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); bool has_num = user_para->cpu_num && user_para->freq_num && user_para->gov_num; if ( has_num ) { if ( (!user_para->affected_cpus) || (!user_para->scaling_available_frequencies) || (!user_para->scaling_available_governors) ) { errno = EINVAL; return -1; } if ( xc_hypercall_bounce_pre(xch, affected_cpus) ) goto unlock_1; if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) ) goto unlock_2; if ( xc_hypercall_bounce_pre(xch, scaling_available_governors) ) goto unlock_3; set_xen_guest_handle(sys_para->affected_cpus, affected_cpus); set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies); set_xen_guest_handle(sys_para->scaling_available_governors, scaling_available_governors); } sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA; sysctl.u.pm_op.cpuid = cpuid; sys_para->cpu_num = user_para->cpu_num; sys_para->freq_num = user_para->freq_num; sys_para->gov_num = user_para->gov_num; ret = xc_sysctl(xch, &sysctl); if ( ret ) { if ( errno == EAGAIN ) { user_para->cpu_num = sys_para->cpu_num; user_para->freq_num = sys_para->freq_num; user_para->gov_num = sys_para->gov_num; ret = -errno; } if ( has_num ) goto unlock_4; goto unlock_1; } else { user_para->cpuinfo_cur_freq = sys_para->cpuinfo_cur_freq; user_para->cpuinfo_max_freq = sys_para->cpuinfo_max_freq; user_para->cpuinfo_min_freq = sys_para->cpuinfo_min_freq; user_para->scaling_cur_freq = sys_para->scaling_cur_freq; user_para->scaling_max_freq = sys_para->scaling_max_freq; user_para->scaling_min_freq = sys_para->scaling_min_freq; user_para->turbo_enabled = sys_para->turbo_enabled; memcpy(user_para->scaling_driver, sys_para->scaling_driver, CPUFREQ_NAME_LEN); memcpy(user_para->scaling_governor, sys_para->scaling_governor, CPUFREQ_NAME_LEN); /* copy to user_para no matter what cpufreq governor */ XC_BUILD_BUG_ON(sizeof(((struct xc_get_cpufreq_para *)0)->u) != sizeof(((struct xen_get_cpufreq_para *)0)->u)); memcpy(&user_para->u, &sys_para->u, sizeof(sys_para->u)); } unlock_4: xc_hypercall_bounce_post(xch, scaling_available_governors); unlock_3: xc_hypercall_bounce_post(xch, scaling_available_frequencies); unlock_2: xc_hypercall_bounce_post(xch, affected_cpus); unlock_1: return ret; }