1#[derive(Clone, Debug, Default, PartialEq)]
5pub struct PodStatus {
6 pub allocated_resources: Option<std::collections::BTreeMap<std::string::String, crate::apimachinery::pkg::api::resource::Quantity>>,
8
9 pub conditions: Option<std::vec::Vec<crate::api::core::v1::PodCondition>>,
11
12 pub container_statuses: Option<std::vec::Vec<crate::api::core::v1::ContainerStatus>>,
14
15 pub ephemeral_container_statuses: Option<std::vec::Vec<crate::api::core::v1::ContainerStatus>>,
17
18 pub extended_resource_claim_status: Option<crate::api::core::v1::PodExtendedResourceClaimStatus>,
20
21 pub host_ip: Option<std::string::String>,
23
24 pub host_ips: Option<std::vec::Vec<crate::api::core::v1::HostIP>>,
26
27 pub init_container_statuses: Option<std::vec::Vec<crate::api::core::v1::ContainerStatus>>,
29
30 pub message: Option<std::string::String>,
32
33 pub nominated_node_name: Option<std::string::String>,
35
36 pub observed_generation: Option<i64>,
38
39 pub phase: Option<std::string::String>,
45
46 pub pod_ip: Option<std::string::String>,
48
49 pub pod_ips: Option<std::vec::Vec<crate::api::core::v1::PodIP>>,
51
52 pub qos_class: Option<std::string::String>,
54
55 pub reason: Option<std::string::String>,
57
58 pub resize: Option<std::string::String>,
60
61 pub resource_claim_statuses: Option<std::vec::Vec<crate::api::core::v1::PodResourceClaimStatus>>,
63
64 pub resources: Option<crate::api::core::v1::ResourceRequirements>,
66
67 pub start_time: Option<crate::apimachinery::pkg::apis::meta::v1::Time>,
69}
70
71impl crate::DeepMerge for PodStatus {
72 fn merge_from(&mut self, other: Self) {
73 crate::merge_strategies::map::granular(&mut self.allocated_resources, other.allocated_resources, |current_item, other_item| {
74 crate::DeepMerge::merge_from(current_item, other_item);
75 });
76 crate::merge_strategies::list::map(
77 &mut self.conditions,
78 other.conditions,
79 &[|lhs, rhs| lhs.type_ == rhs.type_],
80 |current_item, other_item| {
81 crate::DeepMerge::merge_from(current_item, other_item);
82 },
83 );
84 crate::merge_strategies::list::atomic(&mut self.container_statuses, other.container_statuses);
85 crate::merge_strategies::list::atomic(&mut self.ephemeral_container_statuses, other.ephemeral_container_statuses);
86 crate::DeepMerge::merge_from(&mut self.extended_resource_claim_status, other.extended_resource_claim_status);
87 crate::DeepMerge::merge_from(&mut self.host_ip, other.host_ip);
88 crate::merge_strategies::list::map(
89 &mut self.host_ips,
90 other.host_ips,
91 &[|lhs, rhs| lhs.ip == rhs.ip],
92 |current_item, other_item| {
93 crate::DeepMerge::merge_from(current_item, other_item);
94 },
95 );
96 crate::merge_strategies::list::atomic(&mut self.init_container_statuses, other.init_container_statuses);
97 crate::DeepMerge::merge_from(&mut self.message, other.message);
98 crate::DeepMerge::merge_from(&mut self.nominated_node_name, other.nominated_node_name);
99 crate::DeepMerge::merge_from(&mut self.observed_generation, other.observed_generation);
100 crate::DeepMerge::merge_from(&mut self.phase, other.phase);
101 crate::DeepMerge::merge_from(&mut self.pod_ip, other.pod_ip);
102 crate::merge_strategies::list::map(
103 &mut self.pod_ips,
104 other.pod_ips,
105 &[|lhs, rhs| lhs.ip == rhs.ip],
106 |current_item, other_item| {
107 crate::DeepMerge::merge_from(current_item, other_item);
108 },
109 );
110 crate::DeepMerge::merge_from(&mut self.qos_class, other.qos_class);
111 crate::DeepMerge::merge_from(&mut self.reason, other.reason);
112 crate::DeepMerge::merge_from(&mut self.resize, other.resize);
113 crate::merge_strategies::list::map(
114 &mut self.resource_claim_statuses,
115 other.resource_claim_statuses,
116 &[|lhs, rhs| lhs.name == rhs.name],
117 |current_item, other_item| {
118 crate::DeepMerge::merge_from(current_item, other_item);
119 },
120 );
121 crate::DeepMerge::merge_from(&mut self.resources, other.resources);
122 crate::DeepMerge::merge_from(&mut self.start_time, other.start_time);
123 }
124}
125
126impl<'de> crate::serde::Deserialize<'de> for PodStatus {
127 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
128 #[allow(non_camel_case_types)]
129 enum Field {
130 Key_allocated_resources,
131 Key_conditions,
132 Key_container_statuses,
133 Key_ephemeral_container_statuses,
134 Key_extended_resource_claim_status,
135 Key_host_ip,
136 Key_host_ips,
137 Key_init_container_statuses,
138 Key_message,
139 Key_nominated_node_name,
140 Key_observed_generation,
141 Key_phase,
142 Key_pod_ip,
143 Key_pod_ips,
144 Key_qos_class,
145 Key_reason,
146 Key_resize,
147 Key_resource_claim_statuses,
148 Key_resources,
149 Key_start_time,
150 Other,
151 }
152
153 impl<'de> crate::serde::Deserialize<'de> for Field {
154 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
155 struct Visitor;
156
157 impl crate::serde::de::Visitor<'_> for Visitor {
158 type Value = Field;
159
160 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
161 f.write_str("field identifier")
162 }
163
164 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
165 Ok(match v {
166 "allocatedResources" => Field::Key_allocated_resources,
167 "conditions" => Field::Key_conditions,
168 "containerStatuses" => Field::Key_container_statuses,
169 "ephemeralContainerStatuses" => Field::Key_ephemeral_container_statuses,
170 "extendedResourceClaimStatus" => Field::Key_extended_resource_claim_status,
171 "hostIP" => Field::Key_host_ip,
172 "hostIPs" => Field::Key_host_ips,
173 "initContainerStatuses" => Field::Key_init_container_statuses,
174 "message" => Field::Key_message,
175 "nominatedNodeName" => Field::Key_nominated_node_name,
176 "observedGeneration" => Field::Key_observed_generation,
177 "phase" => Field::Key_phase,
178 "podIP" => Field::Key_pod_ip,
179 "podIPs" => Field::Key_pod_ips,
180 "qosClass" => Field::Key_qos_class,
181 "reason" => Field::Key_reason,
182 "resize" => Field::Key_resize,
183 "resourceClaimStatuses" => Field::Key_resource_claim_statuses,
184 "resources" => Field::Key_resources,
185 "startTime" => Field::Key_start_time,
186 _ => Field::Other,
187 })
188 }
189 }
190
191 deserializer.deserialize_identifier(Visitor)
192 }
193 }
194
195 struct Visitor;
196
197 impl<'de> crate::serde::de::Visitor<'de> for Visitor {
198 type Value = PodStatus;
199
200 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
201 f.write_str("PodStatus")
202 }
203
204 fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
205 let mut value_allocated_resources: Option<std::collections::BTreeMap<std::string::String, crate::apimachinery::pkg::api::resource::Quantity>> = None;
206 let mut value_conditions: Option<std::vec::Vec<crate::api::core::v1::PodCondition>> = None;
207 let mut value_container_statuses: Option<std::vec::Vec<crate::api::core::v1::ContainerStatus>> = None;
208 let mut value_ephemeral_container_statuses: Option<std::vec::Vec<crate::api::core::v1::ContainerStatus>> = None;
209 let mut value_extended_resource_claim_status: Option<crate::api::core::v1::PodExtendedResourceClaimStatus> = None;
210 let mut value_host_ip: Option<std::string::String> = None;
211 let mut value_host_ips: Option<std::vec::Vec<crate::api::core::v1::HostIP>> = None;
212 let mut value_init_container_statuses: Option<std::vec::Vec<crate::api::core::v1::ContainerStatus>> = None;
213 let mut value_message: Option<std::string::String> = None;
214 let mut value_nominated_node_name: Option<std::string::String> = None;
215 let mut value_observed_generation: Option<i64> = None;
216 let mut value_phase: Option<std::string::String> = None;
217 let mut value_pod_ip: Option<std::string::String> = None;
218 let mut value_pod_ips: Option<std::vec::Vec<crate::api::core::v1::PodIP>> = None;
219 let mut value_qos_class: Option<std::string::String> = None;
220 let mut value_reason: Option<std::string::String> = None;
221 let mut value_resize: Option<std::string::String> = None;
222 let mut value_resource_claim_statuses: Option<std::vec::Vec<crate::api::core::v1::PodResourceClaimStatus>> = None;
223 let mut value_resources: Option<crate::api::core::v1::ResourceRequirements> = None;
224 let mut value_start_time: Option<crate::apimachinery::pkg::apis::meta::v1::Time> = None;
225
226 while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
227 match key {
228 Field::Key_allocated_resources => value_allocated_resources = crate::serde::de::MapAccess::next_value(&mut map)?,
229 Field::Key_conditions => value_conditions = crate::serde::de::MapAccess::next_value(&mut map)?,
230 Field::Key_container_statuses => value_container_statuses = crate::serde::de::MapAccess::next_value(&mut map)?,
231 Field::Key_ephemeral_container_statuses => value_ephemeral_container_statuses = crate::serde::de::MapAccess::next_value(&mut map)?,
232 Field::Key_extended_resource_claim_status => value_extended_resource_claim_status = crate::serde::de::MapAccess::next_value(&mut map)?,
233 Field::Key_host_ip => value_host_ip = crate::serde::de::MapAccess::next_value(&mut map)?,
234 Field::Key_host_ips => value_host_ips = crate::serde::de::MapAccess::next_value(&mut map)?,
235 Field::Key_init_container_statuses => value_init_container_statuses = crate::serde::de::MapAccess::next_value(&mut map)?,
236 Field::Key_message => value_message = crate::serde::de::MapAccess::next_value(&mut map)?,
237 Field::Key_nominated_node_name => value_nominated_node_name = crate::serde::de::MapAccess::next_value(&mut map)?,
238 Field::Key_observed_generation => value_observed_generation = crate::serde::de::MapAccess::next_value(&mut map)?,
239 Field::Key_phase => value_phase = crate::serde::de::MapAccess::next_value(&mut map)?,
240 Field::Key_pod_ip => value_pod_ip = crate::serde::de::MapAccess::next_value(&mut map)?,
241 Field::Key_pod_ips => value_pod_ips = crate::serde::de::MapAccess::next_value(&mut map)?,
242 Field::Key_qos_class => value_qos_class = crate::serde::de::MapAccess::next_value(&mut map)?,
243 Field::Key_reason => value_reason = crate::serde::de::MapAccess::next_value(&mut map)?,
244 Field::Key_resize => value_resize = crate::serde::de::MapAccess::next_value(&mut map)?,
245 Field::Key_resource_claim_statuses => value_resource_claim_statuses = crate::serde::de::MapAccess::next_value(&mut map)?,
246 Field::Key_resources => value_resources = crate::serde::de::MapAccess::next_value(&mut map)?,
247 Field::Key_start_time => value_start_time = crate::serde::de::MapAccess::next_value(&mut map)?,
248 Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
249 }
250 }
251
252 Ok(PodStatus {
253 allocated_resources: value_allocated_resources,
254 conditions: value_conditions,
255 container_statuses: value_container_statuses,
256 ephemeral_container_statuses: value_ephemeral_container_statuses,
257 extended_resource_claim_status: value_extended_resource_claim_status,
258 host_ip: value_host_ip,
259 host_ips: value_host_ips,
260 init_container_statuses: value_init_container_statuses,
261 message: value_message,
262 nominated_node_name: value_nominated_node_name,
263 observed_generation: value_observed_generation,
264 phase: value_phase,
265 pod_ip: value_pod_ip,
266 pod_ips: value_pod_ips,
267 qos_class: value_qos_class,
268 reason: value_reason,
269 resize: value_resize,
270 resource_claim_statuses: value_resource_claim_statuses,
271 resources: value_resources,
272 start_time: value_start_time,
273 })
274 }
275 }
276
277 deserializer.deserialize_struct(
278 "PodStatus",
279 &[
280 "allocatedResources",
281 "conditions",
282 "containerStatuses",
283 "ephemeralContainerStatuses",
284 "extendedResourceClaimStatus",
285 "hostIP",
286 "hostIPs",
287 "initContainerStatuses",
288 "message",
289 "nominatedNodeName",
290 "observedGeneration",
291 "phase",
292 "podIP",
293 "podIPs",
294 "qosClass",
295 "reason",
296 "resize",
297 "resourceClaimStatuses",
298 "resources",
299 "startTime",
300 ],
301 Visitor,
302 )
303 }
304}
305
306impl crate::serde::Serialize for PodStatus {
307 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
308 let mut state = serializer.serialize_struct(
309 "PodStatus",
310 self.allocated_resources.as_ref().map_or(0, |_| 1) +
311 self.conditions.as_ref().map_or(0, |_| 1) +
312 self.container_statuses.as_ref().map_or(0, |_| 1) +
313 self.ephemeral_container_statuses.as_ref().map_or(0, |_| 1) +
314 self.extended_resource_claim_status.as_ref().map_or(0, |_| 1) +
315 self.host_ip.as_ref().map_or(0, |_| 1) +
316 self.host_ips.as_ref().map_or(0, |_| 1) +
317 self.init_container_statuses.as_ref().map_or(0, |_| 1) +
318 self.message.as_ref().map_or(0, |_| 1) +
319 self.nominated_node_name.as_ref().map_or(0, |_| 1) +
320 self.observed_generation.as_ref().map_or(0, |_| 1) +
321 self.phase.as_ref().map_or(0, |_| 1) +
322 self.pod_ip.as_ref().map_or(0, |_| 1) +
323 self.pod_ips.as_ref().map_or(0, |_| 1) +
324 self.qos_class.as_ref().map_or(0, |_| 1) +
325 self.reason.as_ref().map_or(0, |_| 1) +
326 self.resize.as_ref().map_or(0, |_| 1) +
327 self.resource_claim_statuses.as_ref().map_or(0, |_| 1) +
328 self.resources.as_ref().map_or(0, |_| 1) +
329 self.start_time.as_ref().map_or(0, |_| 1),
330 )?;
331 if let Some(value) = &self.allocated_resources {
332 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "allocatedResources", value)?;
333 }
334 if let Some(value) = &self.conditions {
335 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "conditions", value)?;
336 }
337 if let Some(value) = &self.container_statuses {
338 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "containerStatuses", value)?;
339 }
340 if let Some(value) = &self.ephemeral_container_statuses {
341 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "ephemeralContainerStatuses", value)?;
342 }
343 if let Some(value) = &self.extended_resource_claim_status {
344 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "extendedResourceClaimStatus", value)?;
345 }
346 if let Some(value) = &self.host_ip {
347 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostIP", value)?;
348 }
349 if let Some(value) = &self.host_ips {
350 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostIPs", value)?;
351 }
352 if let Some(value) = &self.init_container_statuses {
353 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "initContainerStatuses", value)?;
354 }
355 if let Some(value) = &self.message {
356 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "message", value)?;
357 }
358 if let Some(value) = &self.nominated_node_name {
359 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nominatedNodeName", value)?;
360 }
361 if let Some(value) = &self.observed_generation {
362 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "observedGeneration", value)?;
363 }
364 if let Some(value) = &self.phase {
365 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "phase", value)?;
366 }
367 if let Some(value) = &self.pod_ip {
368 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "podIP", value)?;
369 }
370 if let Some(value) = &self.pod_ips {
371 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "podIPs", value)?;
372 }
373 if let Some(value) = &self.qos_class {
374 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "qosClass", value)?;
375 }
376 if let Some(value) = &self.reason {
377 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reason", value)?;
378 }
379 if let Some(value) = &self.resize {
380 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "resize", value)?;
381 }
382 if let Some(value) = &self.resource_claim_statuses {
383 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "resourceClaimStatuses", value)?;
384 }
385 if let Some(value) = &self.resources {
386 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "resources", value)?;
387 }
388 if let Some(value) = &self.start_time {
389 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "startTime", value)?;
390 }
391 crate::serde::ser::SerializeStruct::end(state)
392 }
393}
394
395#[cfg(feature = "schemars")]
396impl crate::schemars::JsonSchema for PodStatus {
397 fn schema_name() -> std::borrow::Cow<'static, str> {
398 "io.k8s.api.core.v1.PodStatus".into()
399 }
400
401 fn json_schema(__gen: &mut crate::schemars::SchemaGenerator) -> crate::schemars::Schema {
402 crate::schemars::json_schema!({
403 "description": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
404 "type": "object",
405 "properties": {
406 "allocatedResources": {
407 "description": "AllocatedResources is the total requests allocated for this pod by the node. If pod-level requests are not set, this will be the total requests aggregated across containers in the pod.",
408 "type": "object",
409 "additionalProperties": (__gen.subschema_for::<crate::apimachinery::pkg::api::resource::Quantity>()),
410 },
411 "conditions": {
412 "description": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
413 "type": "array",
414 "items": (__gen.subschema_for::<crate::api::core::v1::PodCondition>()),
415 },
416 "containerStatuses": {
417 "description": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
418 "type": "array",
419 "items": (__gen.subschema_for::<crate::api::core::v1::ContainerStatus>()),
420 },
421 "ephemeralContainerStatuses": {
422 "description": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
423 "type": "array",
424 "items": (__gen.subschema_for::<crate::api::core::v1::ContainerStatus>()),
425 },
426 "extendedResourceClaimStatus": ({
427 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PodExtendedResourceClaimStatus>();
428 schema_obj.ensure_object().insert("description".into(), "Status of extended resource claim backed by DRA.".into());
429 schema_obj
430 }),
431 "hostIP": {
432 "description": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
433 "type": "string",
434 },
435 "hostIPs": {
436 "description": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
437 "type": "array",
438 "items": (__gen.subschema_for::<crate::api::core::v1::HostIP>()),
439 },
440 "initContainerStatuses": {
441 "description": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
442 "type": "array",
443 "items": (__gen.subschema_for::<crate::api::core::v1::ContainerStatus>()),
444 },
445 "message": {
446 "description": "A human readable message indicating details about why the pod is in this condition.",
447 "type": "string",
448 },
449 "nominatedNodeName": {
450 "description": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
451 "type": "string",
452 },
453 "observedGeneration": {
454 "description": "If set, this represents the .metadata.generation that the pod status was set based upon. The PodObservedGenerationTracking feature gate must be enabled to use this field.",
455 "type": "integer",
456 "format": "int64",
457 },
458 "phase": {
459 "description": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
460 "type": "string",
461 },
462 "podIP": {
463 "description": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
464 "type": "string",
465 },
466 "podIPs": {
467 "description": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
468 "type": "array",
469 "items": (__gen.subschema_for::<crate::api::core::v1::PodIP>()),
470 },
471 "qosClass": {
472 "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
473 "type": "string",
474 },
475 "reason": {
476 "description": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
477 "type": "string",
478 },
479 "resize": {
480 "description": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
481 "type": "string",
482 },
483 "resourceClaimStatuses": {
484 "description": "Status of resource claims.",
485 "type": "array",
486 "items": (__gen.subschema_for::<crate::api::core::v1::PodResourceClaimStatus>()),
487 },
488 "resources": ({
489 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::ResourceRequirements>();
490 schema_obj.ensure_object().insert("description".into(), "Resources represents the compute resource requests and limits that have been applied at the pod level if pod-level requests or limits are set in PodSpec.Resources".into());
491 schema_obj
492 }),
493 "startTime": ({
494 let mut schema_obj = __gen.subschema_for::<crate::apimachinery::pkg::apis::meta::v1::Time>();
495 schema_obj.ensure_object().insert("description".into(), "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.".into());
496 schema_obj
497 }),
498 },
499 })
500 }
501}