From 24afdc93bc6f217dd2d52a555c80d94177900502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 16 Dec 2025 14:03:07 +0000 Subject: [PATCH 01/10] CP-310853: claim the entire footprint of the VM for now MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 6c65d467f3..67f855189e 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1110,7 +1110,7 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = Xenops_server.cores_of_numa_affinity_policy pin ~vcpus in numa_placement domid ~vcpus ~cores - ~memory:(Int64.mul memory.xen_max_mib 1048576L) + ~memory:(Int64.mul memory.required_host_free_mib 1048576L) affinity |> Option.map fst ) From 060d7925017716cdbba73a0c3efe4da2ff24242b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 17 Dec 2025 14:48:19 +0000 Subject: [PATCH 02/10] CA-422188: either always use claims or never use claims MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do not mix using claims with not using claims. Xen cannot currently guarantee that it'll honour a VM's memory claim, unless all other VMs also use claims. Global claims have existed since a long time in Xen, so this should be safe to do on both XS8 and XS9. Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 67f855189e..e07e865e25 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1002,6 +1002,8 @@ let numa_placement domid ~vcpus ~cores ~memory affinity = in let nr_pages = Int64.div memory 4096L |> Int64.to_int in try + D.debug "NUMAClaim domid %d: local claim on node %d: %d pages" domid + node nr_pages ; Xenctrlext.domain_claim_pages xcext domid ~numa_node nr_pages ; set_vcpu_affinity cpu_affinity ; Some (node, memory) @@ -1009,6 +1011,7 @@ let numa_placement domid ~vcpus ~cores ~memory affinity = | Xenctrlext.Not_available -> (* Xen does not provide the interface to claim pages from a single NUMA node, ignore the error and continue. *) + D.debug "NUMAClaim domid %d: local claim not available" domid ; None | Xenctrlext.Unix_error (errno, _) -> D.info @@ -1109,10 +1112,29 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = and cores = Xenops_server.cores_of_numa_affinity_policy pin ~vcpus in - numa_placement domid ~vcpus ~cores - ~memory:(Int64.mul memory.required_host_free_mib 1048576L) - affinity - |> Option.map fst + let memory = Int64.mul memory.required_host_free_mib 1048576L in + match numa_placement domid ~vcpus ~cores ~memory affinity with + | None -> + (* Always perform a global claim when NUMA placement is + enabled, and single node claims failed or were + unavailable: + This tries to ensures that memory allocated for this + domain won't use up memory claimed by other domains. + If claims are mixed with non-claims then Xen can't + currently guarantee that it would honour the existing + claims. + A failure here is a hard failure: we'd fail allocating + memory later anyway + *) + let nr_pages = Int64.div memory 4096L |> Int64.to_int in + let xcext = Xenctrlext.get_handle () in + D.debug "NUMAClaim domid %d: global claim: %d pages" domid + nr_pages ; + Xenctrlext.domain_claim_pages xcext domid + ~numa_node:Xenctrlext.NumaNode.none nr_pages ; + None + | Some (plan, _) -> + Some plan ) in let store_chan, console_chan = create_channels ~xc uuid domid in From 4437591c45d3aede7cce4440935162941f45672f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 17 Dec 2025 14:53:56 +0000 Subject: [PATCH 03/10] CA-422187: fix NUMA on XS8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On XS8 we always raise an exception when attempting to claim from a single node. We wanted to only use soft affinity when the single node claim succeeded (which is the correct fix on XS9, where this API is available). However this meant that we've effectively completely disabled NUMA support on XS8, without any way to turn it on. Always use soft affinity when the single-node claim API is unavailable, this should keep NUMA working on XS8. On XS9 Xen itself would never raise ENOSYS (it has a `err = errno = 0` on ENOSYS). Fixes: fb66dfc03 ("CA-421847: set vcpu affinity if node claim succeeded") Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 1 + 1 file changed, 1 insertion(+) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index e07e865e25..b80ed923cd 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1012,6 +1012,7 @@ let numa_placement domid ~vcpus ~cores ~memory affinity = (* Xen does not provide the interface to claim pages from a single NUMA node, ignore the error and continue. *) D.debug "NUMAClaim domid %d: local claim not available" domid ; + set_vcpu_affinity cpu_affinity ; None | Xenctrlext.Unix_error (errno, _) -> D.info From cb363f009a960c908a03a0a7ae6289daee499c18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 18 Dec 2025 13:53:12 +0000 Subject: [PATCH 04/10] CA-422187: make power of 2 more explicit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index b80ed923cd..36afd289f3 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1113,7 +1113,9 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = and cores = Xenops_server.cores_of_numa_affinity_policy pin ~vcpus in - let memory = Int64.mul memory.required_host_free_mib 1048576L in + let memory = + Int64.(mul memory.required_host_free_mib (shift_left 1L 20)) + in match numa_placement domid ~vcpus ~cores ~memory affinity with | None -> (* Always perform a global claim when NUMA placement is From 112db1f4dcfb57ff040a54e1470eacb085217992 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 18 Dec 2025 13:53:53 +0000 Subject: [PATCH 05/10] CA-422187: only ENOMEM is retrieable when a single-node NUMA claim fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 36afd289f3..0a643997f1 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1014,7 +1014,7 @@ let numa_placement domid ~vcpus ~cores ~memory affinity = D.debug "NUMAClaim domid %d: local claim not available" domid ; set_vcpu_affinity cpu_affinity ; None - | Xenctrlext.Unix_error (errno, _) -> + | Xenctrlext.Unix_error ((Unix.ENOMEM as errno), _) -> D.info "%s: unable to claim enough memory, domain %d won't be hosted in a \ single NUMA node. (error %s)" From 95367e1aa6b965f43c5bd30a3de7dd1fac4fce16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 18 Dec 2025 16:06:54 +0000 Subject: [PATCH 06/10] CA-422187: safer defaults for global claims MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Xen may have already allocated some memory for the domain, and the overhead is only an estimate. A global claim failing is a hard failure, so instead use a more conservative estimate: `memory.build_start_mib`. This is similar to `required_host_free_mib`, but doesn't take overhead into account. Eventually we'd want to have another argument to the create hypercall that tells it what NUMA node(s) to use, and then we can include all the overhead too there. For the single node claim keep the amount as it was, it is only a best effort claim. Fixes: 060d79250 ("CA-422188: either always use claims or never use claims") Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 0a643997f1..1ea2bdeb9c 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1115,6 +1115,8 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = in let memory = Int64.(mul memory.required_host_free_mib (shift_left 1L 20)) + and memory_hard = + Int64.(mul memory.build_start_mib (shift_left 1L 20)) in match numa_placement domid ~vcpus ~cores ~memory affinity with | None -> @@ -1129,7 +1131,7 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = A failure here is a hard failure: we'd fail allocating memory later anyway *) - let nr_pages = Int64.div memory 4096L |> Int64.to_int in + let nr_pages = Int64.div memory_hard 4096L |> Int64.to_int in let xcext = Xenctrlext.get_handle () in D.debug "NUMAClaim domid %d: global claim: %d pages" domid nr_pages ; From 02c6ed1a7e4088b49225a0d730d6077eda8202f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 18 Dec 2025 17:33:23 +0000 Subject: [PATCH 07/10] CA-422187: do not claim shadow_mib, it has already been allocated MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When rebooting lots of VMs in parallel we might run out of memory and fail to boot all the VMs again. This is because we overestimate the amount of memory required, and claim too much. That memory is released when the domain build finishes, but when building domains in parallel it'll temporarily result in an out of memory error. Instead try to claim only what is left to be allocated: the p2m map and shadow map have already been allocated by this point. Fixes: 95367e1aa ("CA-422187: safer defaults for global claims") Signed-off-by: Edwin Török --- ocaml/xapi-idl/memory/memory.ml | 3 +++ ocaml/xenopsd/xc/domain.ml | 4 +--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi-idl/memory/memory.ml b/ocaml/xapi-idl/memory/memory.ml index 99951f7e3e..4d0b0e0043 100644 --- a/ocaml/xapi-idl/memory/memory.ml +++ b/ocaml/xapi-idl/memory/memory.ml @@ -192,6 +192,9 @@ module Memory_model (D : MEMORY_MODEL_DATA) = struct static_max_mib --- Int64.of_int video_mib +++ D.shim_mib static_max_mib let build_start_mib static_max_mib target_mib video_mib = + D.extra_internal_mib + +++ D.extra_external_mib + +++ if D.can_start_ballooned_down then target_mib --- Int64.of_int video_mib +++ D.shim_mib target_mib else diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 1ea2bdeb9c..56137d3669 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1114,8 +1114,6 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = Xenops_server.cores_of_numa_affinity_policy pin ~vcpus in let memory = - Int64.(mul memory.required_host_free_mib (shift_left 1L 20)) - and memory_hard = Int64.(mul memory.build_start_mib (shift_left 1L 20)) in match numa_placement domid ~vcpus ~cores ~memory affinity with @@ -1131,7 +1129,7 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = A failure here is a hard failure: we'd fail allocating memory later anyway *) - let nr_pages = Int64.div memory_hard 4096L |> Int64.to_int in + let nr_pages = Int64.div memory 4096L |> Int64.to_int in let xcext = Xenctrlext.get_handle () in D.debug "NUMAClaim domid %d: global claim: %d pages" domid nr_pages ; From 95491040d998ebcaf69563e25760922a24d038f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 19 Dec 2025 09:33:27 +0000 Subject: [PATCH 08/10] CA-422187: claim just the bare minimum MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a domain build finishes Xen releases any extra unused memory from the claim. In my tests that is ~544 pages, which is about the amount that got added here, so we're double counting something. Remove the hack, so we allocate just the bare minimum. Fixes: 02c6ed1a7 ("CA-422187: do not claim shadow_mib, it has already been allocated") Signed-off-by: Edwin Török --- ocaml/xapi-idl/memory/memory.ml | 3 --- 1 file changed, 3 deletions(-) diff --git a/ocaml/xapi-idl/memory/memory.ml b/ocaml/xapi-idl/memory/memory.ml index 4d0b0e0043..99951f7e3e 100644 --- a/ocaml/xapi-idl/memory/memory.ml +++ b/ocaml/xapi-idl/memory/memory.ml @@ -192,9 +192,6 @@ module Memory_model (D : MEMORY_MODEL_DATA) = struct static_max_mib --- Int64.of_int video_mib +++ D.shim_mib static_max_mib let build_start_mib static_max_mib target_mib video_mib = - D.extra_internal_mib - +++ D.extra_external_mib - +++ if D.can_start_ballooned_down then target_mib --- Int64.of_int video_mib +++ D.shim_mib target_mib else From a4bc2bbe16374808cf1d1255bf07adfca14f2554 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 19 Dec 2025 11:34:26 +0000 Subject: [PATCH 09/10] CA-422187: more accurate claims and debug messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We noticed that xenguest releases 32 unused pages from the domain's claim. These are from the low 1MiB video range, so avoid requesting it. Also always print memory free statistics when `wait_xen_free_mem` is called. Turns out `scrub_pages` is always 0, since this never got implemented in Xen (it is hardcoded to 0). Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/domain.ml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 56137d3669..73ec591178 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -244,7 +244,10 @@ let wait_xen_free_mem ~xc ?(maximum_wait_time_seconds = 64) required_memory_kib in (* At exponentially increasing intervals, write *) (* a debug message saying how long we've waited: *) - if is_power_of_2 accumulated_wait_time_seconds then + if + accumulated_wait_time_seconds = 0 + || is_power_of_2 accumulated_wait_time_seconds + then debug "Waited %i second(s) for memory to become available: %Ld KiB free, %Ld \ KiB scrub, %Ld KiB required" @@ -1000,7 +1003,7 @@ let numa_placement domid ~vcpus ~cores ~memory affinity = __FUNCTION__ domid ; None in - let nr_pages = Int64.div memory 4096L |> Int64.to_int in + let nr_pages = (Int64.div memory 4096L |> Int64.to_int) - 32 in try D.debug "NUMAClaim domid %d: local claim on node %d: %d pages" domid node nr_pages ; @@ -1129,7 +1132,9 @@ let build_pre ~xc ~xs ~vcpus ~memory ~hard_affinity domid = A failure here is a hard failure: we'd fail allocating memory later anyway *) - let nr_pages = Int64.div memory 4096L |> Int64.to_int in + let nr_pages = + (Int64.div memory 4096L |> Int64.to_int) - 32 + in let xcext = Xenctrlext.get_handle () in D.debug "NUMAClaim domid %d: global claim: %d pages" domid nr_pages ; From 577e3a61d3b63824c0326d95ef2d5a494c54fda5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 19 Dec 2025 17:32:52 +0000 Subject: [PATCH 10/10] CA-422187: create an emergency reserve of pages (workaround) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do not let domains fully use up all available memory on the host, we have too many unexplained bugs in this area. As a workaround try to reserve some amount (e.g. 256MiB) that domains cannot normally use from XAPI's point of view. Then during parallel domain construction this emergency reserve can be used by Xen. Signed-off-by: Edwin Török --- doc/content/design/numa.md | 2 +- doc/content/xenopsd/walkthroughs/VM.build/Domain.build.md | 2 +- ocaml/libs/xenctrl-ext/xenctrlext.ml | 5 +++++ ocaml/libs/xenctrl-ext/xenctrlext.mli | 2 ++ ocaml/squeezed/src/dune | 1 + ocaml/squeezed/src/squeeze_xen.ml | 4 ++-- ocaml/xcp-rrdd/bin/rrdp-cpu/dune | 1 + ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml | 6 +++--- ocaml/xcp-rrdd/bin/rrdp-squeezed/dune | 1 + ocaml/xcp-rrdd/bin/rrdp-squeezed/rrdp_squeezed.ml | 2 +- ocaml/xenopsd/xc/domain.ml | 4 ++-- ocaml/xenopsd/xc/emu_manager.ml | 2 +- ocaml/xenopsd/xc/memory_breakdown.ml | 2 +- ocaml/xenopsd/xc/memory_summary.ml | 2 +- ocaml/xenopsd/xc/xenguestHelper.ml | 2 +- ocaml/xenopsd/xc/xenops_server_xen.ml | 2 +- 16 files changed, 25 insertions(+), 15 deletions(-) diff --git a/doc/content/design/numa.md b/doc/content/design/numa.md index fa1917b3c5..b9eb0adabe 100644 --- a/doc/content/design/numa.md +++ b/doc/content/design/numa.md @@ -112,7 +112,7 @@ This function receives as arguments a domain ID and the number of nodes this domain is using (acquired using `domain_get_numa_info_node_pages`) The number of NUMA nodes of the host (not domain) is reported by -`Xenctrl.physinfo` which returns a value of type `physinfo`. +`Xenctrlext.physinfo` which returns a value of type `physinfo`. ```diff index b4579862ff..491bd3fc73 100644 diff --git a/doc/content/xenopsd/walkthroughs/VM.build/Domain.build.md b/doc/content/xenopsd/walkthroughs/VM.build/Domain.build.md index ba4274e243..e7d407f0e0 100644 --- a/doc/content/xenopsd/walkthroughs/VM.build/Domain.build.md +++ b/doc/content/xenopsd/walkthroughs/VM.build/Domain.build.md @@ -64,7 +64,7 @@ to call: [wait_xen_free_mem](https://github.com/xapi-project/xen-api/blob/master/ocaml/xenopsd/xc/domain.ml#L236-L272) to wait (if necessary), for the Xen memory scrubber to catch up reclaiming memory. It - 1. calls `Xenctrl.physinfo` which returns: + 1. calls `Xenctrlext.physinfo` which returns: - `hostinfo.free_pages` - the free and already scrubbed pages (available) - `host.scrub_pages` - the not yet scrubbed pages (not yet available) 2. repeats this until a timeout as long as `free_pages` is *lower* diff --git a/ocaml/libs/xenctrl-ext/xenctrlext.ml b/ocaml/libs/xenctrl-ext/xenctrlext.ml index 8922e49046..b481f36468 100644 --- a/ocaml/libs/xenctrl-ext/xenctrlext.ml +++ b/ocaml/libs/xenctrl-ext/xenctrlext.ml @@ -131,3 +131,8 @@ let domain_claim_pages handle domid ?(numa_node = NumaNode.none) nr_pages = let get_nr_nodes handle = let info = numainfo handle in Array.length info.memory + +let physinfo xc = + let info = Xenctrl.physinfo xc in + let emergency_reserve_pages = Nativeint.shift_left 1n 16 in + {info with free_pages= Nativeint.sub info.free_pages emergency_reserve_pages} diff --git a/ocaml/libs/xenctrl-ext/xenctrlext.mli b/ocaml/libs/xenctrl-ext/xenctrlext.mli index f9b8b49bb8..e6bb1a1f2c 100644 --- a/ocaml/libs/xenctrl-ext/xenctrlext.mli +++ b/ocaml/libs/xenctrl-ext/xenctrlext.mli @@ -107,3 +107,5 @@ val domain_claim_pages : handle -> domid -> ?numa_node:NumaNode.t -> int -> unit val get_nr_nodes : handle -> int (** Returns the count of NUMA nodes available in the system. *) + +val physinfo : Xenctrl.handle -> Xenctrl.physinfo diff --git a/ocaml/squeezed/src/dune b/ocaml/squeezed/src/dune index 4e9fe64309..3e4bdd5d3c 100644 --- a/ocaml/squeezed/src/dune +++ b/ocaml/squeezed/src/dune @@ -13,6 +13,7 @@ threads.posix unix xenctrl + xenctrl_ext xenstore xenstore.unix xenstore_transport diff --git a/ocaml/squeezed/src/squeeze_xen.ml b/ocaml/squeezed/src/squeeze_xen.ml index 31bac6df75..7808730139 100644 --- a/ocaml/squeezed/src/squeeze_xen.ml +++ b/ocaml/squeezed/src/squeeze_xen.ml @@ -579,7 +579,7 @@ let make_host ~verbose ~xc = pages -- this might cause something else to fail (eg domain builder?) *) while Int64.div - ((Xenctrl.physinfo xc).Xenctrl.scrub_pages |> Int64.of_nativeint) + ((Xenctrlext.physinfo xc).Xenctrl.scrub_pages |> Int64.of_nativeint) 1024L <> 0L do @@ -762,7 +762,7 @@ let make_host ~verbose ~xc = (* For the host free memory we sum the free pages and the pages needing scrubbing: we don't want to adjust targets simply because the scrubber is slow. *) - let physinfo = Xenctrl.physinfo xc in + let physinfo = Xenctrlext.physinfo xc in let free_pages_kib = Xenctrl.pages_to_kib (Int64.of_nativeint physinfo.Xenctrl.free_pages) and scrub_pages_kib = diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/dune b/ocaml/xcp-rrdd/bin/rrdp-cpu/dune index ced826c63a..de3ad8d497 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-cpu/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/dune @@ -11,6 +11,7 @@ xapi-rrd xapi-stdext-unix xenctrl + xenctrl_ext ) ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml index a677fd1746..b64f04db0a 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml @@ -184,7 +184,7 @@ let dss_pcpus xc = let len = Array.length !physcpus in let newinfos = if len = 0 then ( - let physinfo = Xenctrl.physinfo xc in + let physinfo = Xenctrlext.physinfo xc in let pcpus = physinfo.Xenctrl.nr_cpus in physcpus := if pcpus > 0 then Array.make pcpus 0L else [||] ; Xenctrl.pcpu_info xc pcpus @@ -237,7 +237,7 @@ let count_power_state_running_domains domains = 0 domains let dss_hostload xc domains = - let physinfo = Xenctrl.physinfo xc in + let physinfo = Xenctrlext.physinfo xc in let pcpus = physinfo.Xenctrl.nr_cpus in let rec sum acc n f = match n with n when n >= 0 -> sum (acc + f n) (n - 1) f | _ -> acc @@ -298,7 +298,7 @@ let _ = let _, domains, _ = Xenctrl_lib.domain_snapshot xc in Process.initialise () ; (* Share one page per PCPU and dom each *) - let physinfo = Xenctrl.physinfo xc in + let physinfo = Xenctrlext.physinfo xc in let shared_page_count = physinfo.Xenctrl.nr_cpus + Int.max Rrd_interface.max_supported_vms (List.length domains) diff --git a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune index 75c8e1f5ab..ee5f217f13 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune @@ -13,6 +13,7 @@ xapi-log xapi-rrd xenctrl + xenctrl_ext xenstore xenstore.unix xenstore_transport diff --git a/ocaml/xcp-rrdd/bin/rrdp-squeezed/rrdp_squeezed.ml b/ocaml/xcp-rrdd/bin/rrdp-squeezed/rrdp_squeezed.ml index df49dca259..09902b1e0f 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-squeezed/rrdp_squeezed.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-squeezed/rrdp_squeezed.ml @@ -169,7 +169,7 @@ let generate_host_sources xc counters = in let memory_reclaimed = bytes_of_kib memory_reclaimed in let memory_possibly_reclaimed = bytes_of_kib memory_possibly_reclaimed in - let physinfo = Xenctrl.physinfo xc in + let physinfo = Xenctrlext.physinfo xc in let total_kib = Xenctrl.pages_to_kib (Int64.of_nativeint physinfo.Xenctrl.total_pages) in diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index 73ec591178..e31b07081f 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -235,7 +235,7 @@ let wait_xen_free_mem ~xc ?(maximum_wait_time_seconds = 64) required_memory_kib : bool = let open Memory in let rec wait accumulated_wait_time_seconds = - let host_info = Xenctrl.physinfo xc in + let host_info = Xenctrlext.physinfo xc in let free_memory_kib = kib_of_pages (Int64.of_nativeint host_info.Xenctrl.free_pages) in @@ -275,7 +275,7 @@ let wait_xen_free_mem ~xc ?(maximum_wait_time_seconds = 64) required_memory_kib let make ~xc ~xs vm_info vcpus domain_config uuid final_uuid no_sharept num_of_vbds num_of_vifs = let open Xenctrl in - let host_info = Xenctrl.physinfo xc in + let host_info = Xenctrlext.physinfo xc in (* Confirm that the running hypervisor supports a specific capability. *) let assert_capability cap ~on_error = diff --git a/ocaml/xenopsd/xc/emu_manager.ml b/ocaml/xenopsd/xc/emu_manager.ml index 9f05127d4c..501aedb55d 100644 --- a/ocaml/xenopsd/xc/emu_manager.ml +++ b/ocaml/xenopsd/xc/emu_manager.ml @@ -205,7 +205,7 @@ let non_debug_receive ?debug_callback cnx = let open Memory in let open Int64 in let open Xenctrl in - let p = Xenctrl.physinfo xc in + let p = Xenctrlext.physinfo xc in error "Memory F %Ld KiB S %Ld KiB T %Ld MiB" (p.free_pages |> of_nativeint |> kib_of_pages) (p.scrub_pages |> of_nativeint |> kib_of_pages) diff --git a/ocaml/xenopsd/xc/memory_breakdown.ml b/ocaml/xenopsd/xc/memory_breakdown.ml index d5c3dbc79f..21b53bcc91 100644 --- a/ocaml/xenopsd/xc/memory_breakdown.ml +++ b/ocaml/xenopsd/xc/memory_breakdown.ml @@ -217,7 +217,7 @@ let print_memory_field_names () = (** Prints memory field values to the console. *) let print_memory_field_values xc xs = - let host = Xenctrl.physinfo xc in + let host = Xenctrlext.physinfo xc in let control_domain_info = Xenctrl.domain_getinfo xc 0 in let control_domain_id = control_domain_info.Xenctrl.handle in let guests = diff --git a/ocaml/xenopsd/xc/memory_summary.ml b/ocaml/xenopsd/xc/memory_summary.ml index 21c3b8add6..3b16a42701 100644 --- a/ocaml/xenopsd/xc/memory_summary.ml +++ b/ocaml/xenopsd/xc/memory_summary.ml @@ -38,7 +38,7 @@ let _ = finished := !delay < 0. ; if !delay > 0. then Unix.sleepf !delay ; flush stdout ; - let physinfo = Xenctrl.physinfo xc in + let physinfo = Xenctrlext.physinfo xc in let one_page = 4096L in let total_pages = Int64.of_nativeint physinfo.Xenctrl.total_pages in let free_pages = diff --git a/ocaml/xenopsd/xc/xenguestHelper.ml b/ocaml/xenopsd/xc/xenguestHelper.ml index 06a28d92f3..64c23b12c9 100644 --- a/ocaml/xenopsd/xc/xenguestHelper.ml +++ b/ocaml/xenopsd/xc/xenguestHelper.ml @@ -205,7 +205,7 @@ let non_debug_receive ?debug_callback cnx = let open Memory in let open Int64 in let open Xenctrl in - let p = Xenctrl.physinfo xc in + let p = Xenctrlext.physinfo xc in (match log_type with Syslog.Debug -> debug | _ -> error) "Memory F %Ld KiB S %Ld KiB T %Ld MiB" (p.free_pages |> of_nativeint |> kib_of_pages) diff --git a/ocaml/xenopsd/xc/xenops_server_xen.ml b/ocaml/xenopsd/xc/xenops_server_xen.ml index 8b4d0a4b40..d3011f2d8f 100644 --- a/ocaml/xenopsd/xc/xenops_server_xen.ml +++ b/ocaml/xenopsd/xc/xenops_server_xen.ml @@ -1064,7 +1064,7 @@ module HOST = struct let pages_per_mib = 256L in Int64.( div - ((Xenctrl.physinfo xc).Xenctrl.total_pages |> of_nativeint) + ((Xenctrlext.physinfo xc).Xenctrl.total_pages |> of_nativeint) pages_per_mib ) )