int libvirt_node_info(NodeInfo * ni) { if (g_conn == NULL || ni == NULL) return -1; virNodeInfo *nf; nf = malloc(sizeof(virNodeInfo)); if (nf == NULL) { logerror(_("error in %s(%d)\n"), __func__, __LINE__); return -1; } /* success = 0, failed = -1 */ if (virNodeGetInfo(g_conn, nf) < 0) { logerror(_("error in %s(%d)\n"), __func__, __LINE__); free(nf); return -1; } __this_lock(); ni->cpu_model = strdup(nf->model); ni->cpu_max = nf->cpus; ni->cpu_mhz = nf->mhz; ni->mem_max = nf->memory; /* ni->numaNodes = nf->nodes; ni->sockets = nf->sockets; ni->coresPerSocket = nf->cores; ni->threadsPerCore = nf->threads; */ __this_unlock(); free(nf); return 0; }
static int doInitialize (struct nc_state_t *nc) { char *s = NULL; virNodeInfo ni; long long dom0_min_mem; logprintfl(EUCADEBUG, "doInitialized() invoked\n"); /* set up paths of Eucalyptus commands NC relies on */ snprintf (nc->gen_libvirt_cmd_path, MAX_PATH, EUCALYPTUS_GEN_LIBVIRT_XML, nc->home, nc->home); snprintf (nc->get_info_cmd_path, MAX_PATH, EUCALYPTUS_GET_XEN_INFO, nc->home, nc->home); snprintf (nc->virsh_cmd_path, MAX_PATH, EUCALYPTUS_VIRSH, nc->home); snprintf (nc->xm_cmd_path, MAX_PATH, EUCALYPTUS_XM); snprintf (nc->detach_cmd_path, MAX_PATH, EUCALYPTUS_DETACH, nc->home, nc->home); snprintf (nc->connect_storage_cmd_path, MAX_PATH, EUCALYPTUS_CONNECT_ISCSI, nc->home, nc->home); snprintf (nc->disconnect_storage_cmd_path, MAX_PATH, EUCALYPTUS_DISCONNECT_ISCSI, nc->home, nc->home); snprintf (nc->get_storage_cmd_path, MAX_PATH, EUCALYPTUS_GET_ISCSI, nc->home, nc->home); strcpy(nc->uri, HYPERVISOR_URI); nc->convert_to_disk = 0; /* check connection is fresh */ if (!check_hypervisor_conn()) { return ERROR_FATAL; } /* get resources */ if (virNodeGetInfo(nc->conn, &ni)) { logprintfl (EUCAFATAL, "error: failed to discover resources\n"); return ERROR_FATAL; } /* dom0-min-mem has to come from xend config file */ s = system_output (nc->get_info_cmd_path); if (get_value (s, "dom0-min-mem", &dom0_min_mem)) { logprintfl (EUCAFATAL, "error: did not find dom0-min-mem in output from %s\n", nc->get_info_cmd_path); free (s); return ERROR_FATAL; } free (s); /* calculate the available memory */ nc->mem_max = ni.memory/1024 - 32 - dom0_min_mem; /* calculate the available cores */ nc->cores_max = ni.cpus; /* let's adjust the values based on the config values */ if (nc->config_max_mem && nc->config_max_mem < nc->mem_max) nc->mem_max = nc->config_max_mem; if (nc->config_max_cores) nc->cores_max = nc->config_max_cores; logprintfl(EUCAINFO, "Using %lld cores\n", nc->cores_max); logprintfl(EUCAINFO, "Using %lld memory\n", nc->mem_max); return OK; }
/* * CPU bandwidth allocation, e.g. 40% CPU, cpu_bw_perc ~ [0 - 100%] * TODO: don't support dynamically change the "period" parameter yet, it only * allocate cpu_quota according to the current period and bw_percentage * TODO: think about the real situation, say we have 8 pCPUs, and the domain * has 2 vCPUs. The the domain take 2 / 8 = 25% of the total physical CPU * resource at most. Does cfs.quota / cfs.period work for only ONE CPU or mean * all CPUs ? If it aims at one CPU, the following function works for each vCPU * of the domain, then it should never exceed its upper bound. */ int alloccpu(virConnectPtr conn, virDomainPtr domain, double cpu_bw_perc) { int ret = -1; unsigned long long cpu_period; long long cpu_quota = -1; virNodeInfo nodeinfo; virDomainInfo dominfo; unsigned int nr_pcpu = 0, nr_vcpu = 0; printf("cpu_bw_perc = %.2lf | ", cpu_bw_perc); if (-1 == virNodeGetInfo(conn, &nodeinfo)) goto cleanup; nr_pcpu = nodeinfo.cpus; printf("nr_pcpu = %u | ", nr_pcpu); if (-1 == virDomainGetInfo(domain, &dominfo)) goto cleanup; nr_vcpu = dominfo.nrVirtCpu; printf("nr_vcpu = %u | ", nr_vcpu); if (-1 == get_vcpu_period(domain, &cpu_period)) goto cleanup; printf("cpu_period = %llu | ", cpu_period); if (cpu_bw_perc <= (double)(nr_vcpu*100/nr_pcpu)) { /* * Compute the new quota which should be allocated to the domain, the * quota is applied to each vcpu, thus the cpu_bw_percentage should be * divided by nr_vcpu. */ cpu_quota = (long long)(cpu_bw_perc / nr_vcpu * nr_pcpu * cpu_period); printf("Choose 1:cpu_quota = %lld\n", cpu_quota); } else { /* * allocate at most (nr_vcpu / nr_pcpu) bandwidth for the domain */ cpu_quota = (long long)(cpu_period); printf("Choose 2:cpu_quota = %lld\n", cpu_quota); } if (-1 == set_vcpu_quota_ll(domain, &cpu_quota)) goto cleanup; ret = 0; cleanup: return ret; }
static int doInitialize (struct nc_state_t *nc) { char *s = NULL; virNodeInfo ni; long long dom0_min_mem; // set up paths of Eucalyptus commands NC relies on snprintf (nc->get_info_cmd_path, MAX_PATH, EUCALYPTUS_GET_XEN_INFO, nc->home, nc->home); snprintf (nc->virsh_cmd_path, MAX_PATH, EUCALYPTUS_VIRSH, nc->home); snprintf (nc->xm_cmd_path, MAX_PATH, EUCALYPTUS_XM); snprintf (nc->detach_cmd_path, MAX_PATH, EUCALYPTUS_DETACH, nc->home, nc->home); strcpy(nc->uri, HYPERVISOR_URI); nc->convert_to_disk = 0; nc->capability = HYPERVISOR_XEN_AND_HARDWARE; // TODO: set to XEN_PARAVIRTUALIZED if on older Xen kernel // check connection is fresh if (!check_hypervisor_conn()) { return ERROR_FATAL; } // get resources if (virNodeGetInfo(nc->conn, &ni)) { logprintfl (EUCAFATAL, "failed to discover resources\n"); return ERROR_FATAL; } // dom0-min-mem has to come from xend config file s = system_output (nc->get_info_cmd_path); if (get_value (s, "dom0-min-mem", &dom0_min_mem)) { logprintfl (EUCAFATAL, "did not find dom0-min-mem in output from %s\n", nc->get_info_cmd_path); free (s); return ERROR_FATAL; } free (s); // calculate the available memory nc->mem_max = ni.memory/1024 - 32 - dom0_min_mem; // calculate the available cores nc->cores_max = ni.cpus; // let's adjust the values based on the config values if (nc->config_max_mem && nc->config_max_mem < nc->mem_max) nc->mem_max = nc->config_max_mem; if (nc->config_max_cores) nc->cores_max = nc->config_max_cores; return OK; }
unsigned int getNumOfHostCPUs(virConnectPtr conn) { virNodeInfoPtr info = NULL; unsigned int ret = 0; if (VIR_ALLOC(info) < 0) goto cleanup; if (virNodeGetInfo(conn, info) < 0) goto cleanup; ret = info->cpus; cleanup: VIR_FREE(info); return ret; }
unsigned long getHostMemory(virConnectPtr conn) { virNodeInfoPtr info = NULL; unsigned int ret = 0; if (VIR_ALLOC(info) < 0) goto cleanup; if (virNodeGetInfo(conn, info) < 0) goto cleanup; ret = info->memory; cleanup: VIR_FREE(info); return ret; }
int ruby_libvirt_get_maxcpus(virConnectPtr conn) { int maxcpu = -1; virNodeInfo nodeinfo; #if HAVE_VIRNODEGETCPUMAP maxcpu = virNodeGetCPUMap(conn, NULL, NULL, 0); #endif if (maxcpu < 0) { /* fall back to nodeinfo */ ruby_libvirt_raise_error_if(virNodeGetInfo(conn, &nodeinfo) < 0, e_RetrieveError, "virNodeGetInfo", conn); maxcpu = VIR_NODEINFO_MAXCPUS(nodeinfo); } return maxcpu; }
static int doDescribeHardware ( struct nc_state_t *nc, ncMetadata *meta, ncHardwareInfo *hwinfo) { virNodeInfo info; virConnectPtr *con = check_hypervisor_conn(); sem_p (hyp_sem); if (virNodeGetInfo (*con, &info) != 0) return (-1); sem_v (hyp_sem); strcpy (hwinfo->model, info.model); hwinfo->memory = info.memory; hwinfo->cpus = info.cpus; hwinfo->mhz = info.mhz; hwinfo->nodes = info.nodes; hwinfo->sockets = info.sockets; hwinfo->cores = info.cores; hwinfo->threads = info.threads; return (0); }
NodeWrap::NodeWrap(ManagementAgent* _agent, string _name) : name(_name), agent(_agent) { virNodeInfo info; char *hostname; char libvirt_version[256] = "Unknown"; char api_version[256] = "Unknown"; char hv_version[256] = "Unknown"; char *uri; const char *hv_type; unsigned long api_v; unsigned long libvirt_v; unsigned long hv_v; int ret; unsigned int major; unsigned int minor; unsigned int rel; conn = virConnectOpen(NULL); if (!conn) { REPORT_ERR(conn, "virConnectOpen"); throw -1; } hostname = virConnectGetHostname(conn); if (hostname == NULL) { REPORT_ERR(conn, "virConnectGetHostname"); throw -1; } hv_type = virConnectGetType(conn); if (hv_type == NULL) { REPORT_ERR(conn, "virConnectGetType"); throw -1; } uri = virConnectGetURI(conn); if (uri == NULL) { REPORT_ERR(conn, "virConnectGetURI"); throw -1; } ret = virGetVersion(&libvirt_v, hv_type, &api_v); if (ret < 0) { REPORT_ERR(conn, "virGetVersion"); } else { major = libvirt_v / 1000000; libvirt_v %= 1000000; minor = libvirt_v / 1000; rel = libvirt_v % 1000; snprintf(libvirt_version, sizeof(libvirt_version), "%d.%d.%d", major, minor, rel); major = api_v / 1000000; api_v %= 1000000; minor = api_v / 1000; rel = api_v % 1000; snprintf(api_version, sizeof(api_version), "%d.%d.%d", major, minor, rel); } ret = virConnectGetVersion(conn, &hv_v); if (ret < 0) { REPORT_ERR(conn, "virConnectGetVersion"); } else { major = hv_v / 1000000; hv_v %= 1000000; minor = hv_v / 1000; rel = hv_v % 1000; snprintf(hv_version, sizeof(hv_version), "%d.%d.%d", major, minor, rel); } ret = virNodeGetInfo(conn, &info); if (ret < 0) { REPORT_ERR(conn, "virNodeGetInfo"); memset((void *) &info, sizeof(info), 1); } mgmtObject = new _qmf::Node(agent, this, hostname, uri, libvirt_version, api_version, hv_version, hv_type, info.model, info.memory, info.cpus, info.mhz, info.nodes, info.sockets, info.cores, info.threads); agent->addObject(mgmtObject); }
int main() { int idCount; int i; int id; //int ids[MAXID]; int *ids; //timeInfoNode timeInfos[MAXID]; printf("--------------------------------------------------------\n"); printf(" XEN Domain Monitor \n"); printf("--------------------------------------------------------\n"); /* NULL means connect to local Xen hypervisor */ conn = virConnectOpenReadOnly(NULL); if (conn == NULL) { fprintf(stderr, "Failed to connect to hypervisor\n"); closeConn(); return 0; } /*char* caps; caps = virConnectGetCapabilities(conn); printf("Capabilities:\n%s\n",caps); free(caps);*/ char *host; host = virConnectGetHostname(conn); fprintf(stdout, "Hostname:%s\n",host); free(host); int vcpus; vcpus = virConnectGetMaxVcpus(conn,NULL); fprintf(stdout, "Maxmum support vcpus:%d\n",vcpus); unsigned long long node_free_memory; node_free_memory = virNodeGetFreeMemory(conn); fprintf(stdout, "free memory:%lld\n",node_free_memory); virNodeInfo nodeinfo; virNodeGetInfo(conn,&nodeinfo); fprintf(stdout, "Model: %s\n", nodeinfo.model); fprintf(stdout, "Memory size: %lukb\n", nodeinfo.memory); fprintf(stdout, "Number of CPUs: %u\n", nodeinfo.cpus); fprintf(stdout, "MHz of CPUs: %u\n", nodeinfo.mhz); fprintf(stdout, "Number of NUMA nodes: %u\n", nodeinfo.nodes); fprintf(stdout, "Number of CPU sockets: %u\n", nodeinfo.sockets); fprintf(stdout, "Number of CPU cores per socket: %u\n", nodeinfo.cores); fprintf(stdout, "Number of CPU threads per core: %u\n", nodeinfo.threads); fprintf(stdout, "Virtualization type: %s\n", virConnectGetType(conn)); unsigned long ver; virConnectGetVersion(conn, &ver); fprintf(stdout, "Version: %lu\n", ver); /*unsigned long Libver; virConnectGetLibVersion(conn, &Libver); fprintf(stdout, "Libvirt Version: %lu\n", Libver);*/ char *uri; uri = virConnectGetURI(conn); fprintf(stdout, "Canonical URI: %s\n", uri); free(uri); /* get the count of IDs and save these ID into ids[] */ idCount = virConnectNumOfDomains(conn); ids = malloc(sizeof(int) *idCount); idCount = virConnectListDomains(conn,ids,idCount); //idCount = virConnectListDomains(conn, &ids[0], MAXID); if (idCount < 0) { fprintf(stderr, "Failed to list the domains\n"); closeConn(); return 0; } timeInfoNode timeInfos[idCount]; printf("Domain Totals: %d\n", idCount); printf("ID\tCPU\tMEM\tMaxMEM\tVCPUs\tState\tNAME\n"); /* loop get the CPUtime info by IDs */ for (i = 0; i < idCount; i++) { id = ids[i]; getTimeInfo(id, &(timeInfos[i])); } sleep(1); /* loop print the domain info and calculate the usage of cpus*/ for (i = 0; i < idCount; i++) { id = ids[i]; getDomainInfo(id, timeInfos[i]); } free(ids); printf("--------------------------------------------------------\n"); closeConn(); return 0; }