1#[derive(Clone, Debug, Default, PartialEq)]
5pub struct PodSpec {
6 pub active_deadline_seconds: Option<i64>,
8
9 pub affinity: Option<crate::api::core::v1::Affinity>,
11
12 pub automount_service_account_token: Option<bool>,
14
15 pub containers: std::vec::Vec<crate::api::core::v1::Container>,
17
18 pub dns_config: Option<crate::api::core::v1::PodDNSConfig>,
20
21 pub dns_policy: Option<std::string::String>,
23
24 pub enable_service_links: Option<bool>,
26
27 pub ephemeral_containers: Option<std::vec::Vec<crate::api::core::v1::EphemeralContainer>>,
29
30 pub host_aliases: Option<std::vec::Vec<crate::api::core::v1::HostAlias>>,
32
33 pub host_ipc: Option<bool>,
35
36 pub host_network: Option<bool>,
38
39 pub host_pid: Option<bool>,
41
42 pub host_users: Option<bool>,
44
45 pub hostname: Option<std::string::String>,
47
48 pub hostname_override: Option<std::string::String>,
52
53 pub image_pull_secrets: Option<std::vec::Vec<crate::api::core::v1::LocalObjectReference>>,
55
56 pub init_containers: Option<std::vec::Vec<crate::api::core::v1::Container>>,
58
59 pub node_name: Option<std::string::String>,
61
62 pub node_selector: Option<std::collections::BTreeMap<std::string::String, std::string::String>>,
64
65 pub os: Option<crate::api::core::v1::PodOS>,
71
72 pub overhead: Option<std::collections::BTreeMap<std::string::String, crate::apimachinery::pkg::api::resource::Quantity>>,
74
75 pub preemption_policy: Option<std::string::String>,
77
78 pub priority: Option<i32>,
80
81 pub priority_class_name: Option<std::string::String>,
83
84 pub readiness_gates: Option<std::vec::Vec<crate::api::core::v1::PodReadinessGate>>,
86
87 pub resource_claims: Option<std::vec::Vec<crate::api::core::v1::PodResourceClaim>>,
93
94 pub resources: Option<crate::api::core::v1::ResourceRequirements>,
100
101 pub restart_policy: Option<std::string::String>,
103
104 pub runtime_class_name: Option<std::string::String>,
106
107 pub scheduler_name: Option<std::string::String>,
109
110 pub scheduling_gates: Option<std::vec::Vec<crate::api::core::v1::PodSchedulingGate>>,
114
115 pub security_context: Option<crate::api::core::v1::PodSecurityContext>,
117
118 pub service_account: Option<std::string::String>,
120
121 pub service_account_name: Option<std::string::String>,
123
124 pub set_hostname_as_fqdn: Option<bool>,
126
127 pub share_process_namespace: Option<bool>,
129
130 pub subdomain: Option<std::string::String>,
132
133 pub termination_grace_period_seconds: Option<i64>,
135
136 pub tolerations: Option<std::vec::Vec<crate::api::core::v1::Toleration>>,
138
139 pub topology_spread_constraints: Option<std::vec::Vec<crate::api::core::v1::TopologySpreadConstraint>>,
141
142 pub volumes: Option<std::vec::Vec<crate::api::core::v1::Volume>>,
144
145 pub workload_ref: Option<crate::api::core::v1::WorkloadReference>,
147}
148
149impl crate::DeepMerge for PodSpec {
150 fn merge_from(&mut self, other: Self) {
151 crate::DeepMerge::merge_from(&mut self.active_deadline_seconds, other.active_deadline_seconds);
152 crate::DeepMerge::merge_from(&mut self.affinity, other.affinity);
153 crate::DeepMerge::merge_from(&mut self.automount_service_account_token, other.automount_service_account_token);
154 crate::merge_strategies::list::map(
155 &mut self.containers,
156 other.containers,
157 &[|lhs, rhs| lhs.name == rhs.name],
158 |current_item, other_item| {
159 crate::DeepMerge::merge_from(current_item, other_item);
160 },
161 );
162 crate::DeepMerge::merge_from(&mut self.dns_config, other.dns_config);
163 crate::DeepMerge::merge_from(&mut self.dns_policy, other.dns_policy);
164 crate::DeepMerge::merge_from(&mut self.enable_service_links, other.enable_service_links);
165 crate::merge_strategies::list::map(
166 &mut self.ephemeral_containers,
167 other.ephemeral_containers,
168 &[|lhs, rhs| lhs.name == rhs.name],
169 |current_item, other_item| {
170 crate::DeepMerge::merge_from(current_item, other_item);
171 },
172 );
173 crate::merge_strategies::list::map(
174 &mut self.host_aliases,
175 other.host_aliases,
176 &[|lhs, rhs| lhs.ip == rhs.ip],
177 |current_item, other_item| {
178 crate::DeepMerge::merge_from(current_item, other_item);
179 },
180 );
181 crate::DeepMerge::merge_from(&mut self.host_ipc, other.host_ipc);
182 crate::DeepMerge::merge_from(&mut self.host_network, other.host_network);
183 crate::DeepMerge::merge_from(&mut self.host_pid, other.host_pid);
184 crate::DeepMerge::merge_from(&mut self.host_users, other.host_users);
185 crate::DeepMerge::merge_from(&mut self.hostname, other.hostname);
186 crate::DeepMerge::merge_from(&mut self.hostname_override, other.hostname_override);
187 crate::merge_strategies::list::map(
188 &mut self.image_pull_secrets,
189 other.image_pull_secrets,
190 &[|lhs, rhs| lhs.name == rhs.name],
191 |current_item, other_item| {
192 crate::DeepMerge::merge_from(current_item, other_item);
193 },
194 );
195 crate::merge_strategies::list::map(
196 &mut self.init_containers,
197 other.init_containers,
198 &[|lhs, rhs| lhs.name == rhs.name],
199 |current_item, other_item| {
200 crate::DeepMerge::merge_from(current_item, other_item);
201 },
202 );
203 crate::DeepMerge::merge_from(&mut self.node_name, other.node_name);
204 crate::merge_strategies::map::atomic(&mut self.node_selector, other.node_selector);
205 crate::DeepMerge::merge_from(&mut self.os, other.os);
206 crate::merge_strategies::map::granular(&mut self.overhead, other.overhead, |current_item, other_item| {
207 crate::DeepMerge::merge_from(current_item, other_item);
208 });
209 crate::DeepMerge::merge_from(&mut self.preemption_policy, other.preemption_policy);
210 crate::DeepMerge::merge_from(&mut self.priority, other.priority);
211 crate::DeepMerge::merge_from(&mut self.priority_class_name, other.priority_class_name);
212 crate::merge_strategies::list::atomic(&mut self.readiness_gates, other.readiness_gates);
213 crate::merge_strategies::list::map(
214 &mut self.resource_claims,
215 other.resource_claims,
216 &[|lhs, rhs| lhs.name == rhs.name],
217 |current_item, other_item| {
218 crate::DeepMerge::merge_from(current_item, other_item);
219 },
220 );
221 crate::DeepMerge::merge_from(&mut self.resources, other.resources);
222 crate::DeepMerge::merge_from(&mut self.restart_policy, other.restart_policy);
223 crate::DeepMerge::merge_from(&mut self.runtime_class_name, other.runtime_class_name);
224 crate::DeepMerge::merge_from(&mut self.scheduler_name, other.scheduler_name);
225 crate::merge_strategies::list::map(
226 &mut self.scheduling_gates,
227 other.scheduling_gates,
228 &[|lhs, rhs| lhs.name == rhs.name],
229 |current_item, other_item| {
230 crate::DeepMerge::merge_from(current_item, other_item);
231 },
232 );
233 crate::DeepMerge::merge_from(&mut self.security_context, other.security_context);
234 crate::DeepMerge::merge_from(&mut self.service_account, other.service_account);
235 crate::DeepMerge::merge_from(&mut self.service_account_name, other.service_account_name);
236 crate::DeepMerge::merge_from(&mut self.set_hostname_as_fqdn, other.set_hostname_as_fqdn);
237 crate::DeepMerge::merge_from(&mut self.share_process_namespace, other.share_process_namespace);
238 crate::DeepMerge::merge_from(&mut self.subdomain, other.subdomain);
239 crate::DeepMerge::merge_from(&mut self.termination_grace_period_seconds, other.termination_grace_period_seconds);
240 crate::merge_strategies::list::atomic(&mut self.tolerations, other.tolerations);
241 crate::merge_strategies::list::map(
242 &mut self.topology_spread_constraints,
243 other.topology_spread_constraints,
244 &[|lhs, rhs| lhs.topology_key == rhs.topology_key],
245 |current_item, other_item| {
246 crate::DeepMerge::merge_from(current_item, other_item);
247 },
248 );
249 crate::merge_strategies::list::map(
250 &mut self.volumes,
251 other.volumes,
252 &[|lhs, rhs| lhs.name == rhs.name],
253 |current_item, other_item| {
254 crate::DeepMerge::merge_from(current_item, other_item);
255 },
256 );
257 crate::DeepMerge::merge_from(&mut self.workload_ref, other.workload_ref);
258 }
259}
260
261impl<'de> crate::serde::Deserialize<'de> for PodSpec {
262 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
263 #[allow(non_camel_case_types)]
264 enum Field {
265 Key_active_deadline_seconds,
266 Key_affinity,
267 Key_automount_service_account_token,
268 Key_containers,
269 Key_dns_config,
270 Key_dns_policy,
271 Key_enable_service_links,
272 Key_ephemeral_containers,
273 Key_host_aliases,
274 Key_host_ipc,
275 Key_host_network,
276 Key_host_pid,
277 Key_host_users,
278 Key_hostname,
279 Key_hostname_override,
280 Key_image_pull_secrets,
281 Key_init_containers,
282 Key_node_name,
283 Key_node_selector,
284 Key_os,
285 Key_overhead,
286 Key_preemption_policy,
287 Key_priority,
288 Key_priority_class_name,
289 Key_readiness_gates,
290 Key_resource_claims,
291 Key_resources,
292 Key_restart_policy,
293 Key_runtime_class_name,
294 Key_scheduler_name,
295 Key_scheduling_gates,
296 Key_security_context,
297 Key_service_account,
298 Key_service_account_name,
299 Key_set_hostname_as_fqdn,
300 Key_share_process_namespace,
301 Key_subdomain,
302 Key_termination_grace_period_seconds,
303 Key_tolerations,
304 Key_topology_spread_constraints,
305 Key_volumes,
306 Key_workload_ref,
307 Other,
308 }
309
310 impl<'de> crate::serde::Deserialize<'de> for Field {
311 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
312 struct Visitor;
313
314 impl crate::serde::de::Visitor<'_> for Visitor {
315 type Value = Field;
316
317 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
318 f.write_str("field identifier")
319 }
320
321 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
322 Ok(match v {
323 "activeDeadlineSeconds" => Field::Key_active_deadline_seconds,
324 "affinity" => Field::Key_affinity,
325 "automountServiceAccountToken" => Field::Key_automount_service_account_token,
326 "containers" => Field::Key_containers,
327 "dnsConfig" => Field::Key_dns_config,
328 "dnsPolicy" => Field::Key_dns_policy,
329 "enableServiceLinks" => Field::Key_enable_service_links,
330 "ephemeralContainers" => Field::Key_ephemeral_containers,
331 "hostAliases" => Field::Key_host_aliases,
332 "hostIPC" => Field::Key_host_ipc,
333 "hostNetwork" => Field::Key_host_network,
334 "hostPID" => Field::Key_host_pid,
335 "hostUsers" => Field::Key_host_users,
336 "hostname" => Field::Key_hostname,
337 "hostnameOverride" => Field::Key_hostname_override,
338 "imagePullSecrets" => Field::Key_image_pull_secrets,
339 "initContainers" => Field::Key_init_containers,
340 "nodeName" => Field::Key_node_name,
341 "nodeSelector" => Field::Key_node_selector,
342 "os" => Field::Key_os,
343 "overhead" => Field::Key_overhead,
344 "preemptionPolicy" => Field::Key_preemption_policy,
345 "priority" => Field::Key_priority,
346 "priorityClassName" => Field::Key_priority_class_name,
347 "readinessGates" => Field::Key_readiness_gates,
348 "resourceClaims" => Field::Key_resource_claims,
349 "resources" => Field::Key_resources,
350 "restartPolicy" => Field::Key_restart_policy,
351 "runtimeClassName" => Field::Key_runtime_class_name,
352 "schedulerName" => Field::Key_scheduler_name,
353 "schedulingGates" => Field::Key_scheduling_gates,
354 "securityContext" => Field::Key_security_context,
355 "serviceAccount" => Field::Key_service_account,
356 "serviceAccountName" => Field::Key_service_account_name,
357 "setHostnameAsFQDN" => Field::Key_set_hostname_as_fqdn,
358 "shareProcessNamespace" => Field::Key_share_process_namespace,
359 "subdomain" => Field::Key_subdomain,
360 "terminationGracePeriodSeconds" => Field::Key_termination_grace_period_seconds,
361 "tolerations" => Field::Key_tolerations,
362 "topologySpreadConstraints" => Field::Key_topology_spread_constraints,
363 "volumes" => Field::Key_volumes,
364 "workloadRef" => Field::Key_workload_ref,
365 _ => Field::Other,
366 })
367 }
368 }
369
370 deserializer.deserialize_identifier(Visitor)
371 }
372 }
373
374 struct Visitor;
375
376 impl<'de> crate::serde::de::Visitor<'de> for Visitor {
377 type Value = PodSpec;
378
379 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
380 f.write_str("PodSpec")
381 }
382
383 fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
384 let mut value_active_deadline_seconds: Option<i64> = None;
385 let mut value_affinity: Option<crate::api::core::v1::Affinity> = None;
386 let mut value_automount_service_account_token: Option<bool> = None;
387 let mut value_containers: Option<std::vec::Vec<crate::api::core::v1::Container>> = None;
388 let mut value_dns_config: Option<crate::api::core::v1::PodDNSConfig> = None;
389 let mut value_dns_policy: Option<std::string::String> = None;
390 let mut value_enable_service_links: Option<bool> = None;
391 let mut value_ephemeral_containers: Option<std::vec::Vec<crate::api::core::v1::EphemeralContainer>> = None;
392 let mut value_host_aliases: Option<std::vec::Vec<crate::api::core::v1::HostAlias>> = None;
393 let mut value_host_ipc: Option<bool> = None;
394 let mut value_host_network: Option<bool> = None;
395 let mut value_host_pid: Option<bool> = None;
396 let mut value_host_users: Option<bool> = None;
397 let mut value_hostname: Option<std::string::String> = None;
398 let mut value_hostname_override: Option<std::string::String> = None;
399 let mut value_image_pull_secrets: Option<std::vec::Vec<crate::api::core::v1::LocalObjectReference>> = None;
400 let mut value_init_containers: Option<std::vec::Vec<crate::api::core::v1::Container>> = None;
401 let mut value_node_name: Option<std::string::String> = None;
402 let mut value_node_selector: Option<std::collections::BTreeMap<std::string::String, std::string::String>> = None;
403 let mut value_os: Option<crate::api::core::v1::PodOS> = None;
404 let mut value_overhead: Option<std::collections::BTreeMap<std::string::String, crate::apimachinery::pkg::api::resource::Quantity>> = None;
405 let mut value_preemption_policy: Option<std::string::String> = None;
406 let mut value_priority: Option<i32> = None;
407 let mut value_priority_class_name: Option<std::string::String> = None;
408 let mut value_readiness_gates: Option<std::vec::Vec<crate::api::core::v1::PodReadinessGate>> = None;
409 let mut value_resource_claims: Option<std::vec::Vec<crate::api::core::v1::PodResourceClaim>> = None;
410 let mut value_resources: Option<crate::api::core::v1::ResourceRequirements> = None;
411 let mut value_restart_policy: Option<std::string::String> = None;
412 let mut value_runtime_class_name: Option<std::string::String> = None;
413 let mut value_scheduler_name: Option<std::string::String> = None;
414 let mut value_scheduling_gates: Option<std::vec::Vec<crate::api::core::v1::PodSchedulingGate>> = None;
415 let mut value_security_context: Option<crate::api::core::v1::PodSecurityContext> = None;
416 let mut value_service_account: Option<std::string::String> = None;
417 let mut value_service_account_name: Option<std::string::String> = None;
418 let mut value_set_hostname_as_fqdn: Option<bool> = None;
419 let mut value_share_process_namespace: Option<bool> = None;
420 let mut value_subdomain: Option<std::string::String> = None;
421 let mut value_termination_grace_period_seconds: Option<i64> = None;
422 let mut value_tolerations: Option<std::vec::Vec<crate::api::core::v1::Toleration>> = None;
423 let mut value_topology_spread_constraints: Option<std::vec::Vec<crate::api::core::v1::TopologySpreadConstraint>> = None;
424 let mut value_volumes: Option<std::vec::Vec<crate::api::core::v1::Volume>> = None;
425 let mut value_workload_ref: Option<crate::api::core::v1::WorkloadReference> = None;
426
427 while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
428 match key {
429 Field::Key_active_deadline_seconds => value_active_deadline_seconds = crate::serde::de::MapAccess::next_value(&mut map)?,
430 Field::Key_affinity => value_affinity = crate::serde::de::MapAccess::next_value(&mut map)?,
431 Field::Key_automount_service_account_token => value_automount_service_account_token = crate::serde::de::MapAccess::next_value(&mut map)?,
432 Field::Key_containers => value_containers = crate::serde::de::MapAccess::next_value(&mut map)?,
433 Field::Key_dns_config => value_dns_config = crate::serde::de::MapAccess::next_value(&mut map)?,
434 Field::Key_dns_policy => value_dns_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
435 Field::Key_enable_service_links => value_enable_service_links = crate::serde::de::MapAccess::next_value(&mut map)?,
436 Field::Key_ephemeral_containers => value_ephemeral_containers = crate::serde::de::MapAccess::next_value(&mut map)?,
437 Field::Key_host_aliases => value_host_aliases = crate::serde::de::MapAccess::next_value(&mut map)?,
438 Field::Key_host_ipc => value_host_ipc = crate::serde::de::MapAccess::next_value(&mut map)?,
439 Field::Key_host_network => value_host_network = crate::serde::de::MapAccess::next_value(&mut map)?,
440 Field::Key_host_pid => value_host_pid = crate::serde::de::MapAccess::next_value(&mut map)?,
441 Field::Key_host_users => value_host_users = crate::serde::de::MapAccess::next_value(&mut map)?,
442 Field::Key_hostname => value_hostname = crate::serde::de::MapAccess::next_value(&mut map)?,
443 Field::Key_hostname_override => value_hostname_override = crate::serde::de::MapAccess::next_value(&mut map)?,
444 Field::Key_image_pull_secrets => value_image_pull_secrets = crate::serde::de::MapAccess::next_value(&mut map)?,
445 Field::Key_init_containers => value_init_containers = crate::serde::de::MapAccess::next_value(&mut map)?,
446 Field::Key_node_name => value_node_name = crate::serde::de::MapAccess::next_value(&mut map)?,
447 Field::Key_node_selector => value_node_selector = crate::serde::de::MapAccess::next_value(&mut map)?,
448 Field::Key_os => value_os = crate::serde::de::MapAccess::next_value(&mut map)?,
449 Field::Key_overhead => value_overhead = crate::serde::de::MapAccess::next_value(&mut map)?,
450 Field::Key_preemption_policy => value_preemption_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
451 Field::Key_priority => value_priority = crate::serde::de::MapAccess::next_value(&mut map)?,
452 Field::Key_priority_class_name => value_priority_class_name = crate::serde::de::MapAccess::next_value(&mut map)?,
453 Field::Key_readiness_gates => value_readiness_gates = crate::serde::de::MapAccess::next_value(&mut map)?,
454 Field::Key_resource_claims => value_resource_claims = crate::serde::de::MapAccess::next_value(&mut map)?,
455 Field::Key_resources => value_resources = crate::serde::de::MapAccess::next_value(&mut map)?,
456 Field::Key_restart_policy => value_restart_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
457 Field::Key_runtime_class_name => value_runtime_class_name = crate::serde::de::MapAccess::next_value(&mut map)?,
458 Field::Key_scheduler_name => value_scheduler_name = crate::serde::de::MapAccess::next_value(&mut map)?,
459 Field::Key_scheduling_gates => value_scheduling_gates = crate::serde::de::MapAccess::next_value(&mut map)?,
460 Field::Key_security_context => value_security_context = crate::serde::de::MapAccess::next_value(&mut map)?,
461 Field::Key_service_account => value_service_account = crate::serde::de::MapAccess::next_value(&mut map)?,
462 Field::Key_service_account_name => value_service_account_name = crate::serde::de::MapAccess::next_value(&mut map)?,
463 Field::Key_set_hostname_as_fqdn => value_set_hostname_as_fqdn = crate::serde::de::MapAccess::next_value(&mut map)?,
464 Field::Key_share_process_namespace => value_share_process_namespace = crate::serde::de::MapAccess::next_value(&mut map)?,
465 Field::Key_subdomain => value_subdomain = crate::serde::de::MapAccess::next_value(&mut map)?,
466 Field::Key_termination_grace_period_seconds => value_termination_grace_period_seconds = crate::serde::de::MapAccess::next_value(&mut map)?,
467 Field::Key_tolerations => value_tolerations = crate::serde::de::MapAccess::next_value(&mut map)?,
468 Field::Key_topology_spread_constraints => value_topology_spread_constraints = crate::serde::de::MapAccess::next_value(&mut map)?,
469 Field::Key_volumes => value_volumes = crate::serde::de::MapAccess::next_value(&mut map)?,
470 Field::Key_workload_ref => value_workload_ref = crate::serde::de::MapAccess::next_value(&mut map)?,
471 Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
472 }
473 }
474
475 Ok(PodSpec {
476 active_deadline_seconds: value_active_deadline_seconds,
477 affinity: value_affinity,
478 automount_service_account_token: value_automount_service_account_token,
479 containers: value_containers.unwrap_or_default(),
480 dns_config: value_dns_config,
481 dns_policy: value_dns_policy,
482 enable_service_links: value_enable_service_links,
483 ephemeral_containers: value_ephemeral_containers,
484 host_aliases: value_host_aliases,
485 host_ipc: value_host_ipc,
486 host_network: value_host_network,
487 host_pid: value_host_pid,
488 host_users: value_host_users,
489 hostname: value_hostname,
490 hostname_override: value_hostname_override,
491 image_pull_secrets: value_image_pull_secrets,
492 init_containers: value_init_containers,
493 node_name: value_node_name,
494 node_selector: value_node_selector,
495 os: value_os,
496 overhead: value_overhead,
497 preemption_policy: value_preemption_policy,
498 priority: value_priority,
499 priority_class_name: value_priority_class_name,
500 readiness_gates: value_readiness_gates,
501 resource_claims: value_resource_claims,
502 resources: value_resources,
503 restart_policy: value_restart_policy,
504 runtime_class_name: value_runtime_class_name,
505 scheduler_name: value_scheduler_name,
506 scheduling_gates: value_scheduling_gates,
507 security_context: value_security_context,
508 service_account: value_service_account,
509 service_account_name: value_service_account_name,
510 set_hostname_as_fqdn: value_set_hostname_as_fqdn,
511 share_process_namespace: value_share_process_namespace,
512 subdomain: value_subdomain,
513 termination_grace_period_seconds: value_termination_grace_period_seconds,
514 tolerations: value_tolerations,
515 topology_spread_constraints: value_topology_spread_constraints,
516 volumes: value_volumes,
517 workload_ref: value_workload_ref,
518 })
519 }
520 }
521
522 deserializer.deserialize_struct(
523 "PodSpec",
524 &[
525 "activeDeadlineSeconds",
526 "affinity",
527 "automountServiceAccountToken",
528 "containers",
529 "dnsConfig",
530 "dnsPolicy",
531 "enableServiceLinks",
532 "ephemeralContainers",
533 "hostAliases",
534 "hostIPC",
535 "hostNetwork",
536 "hostPID",
537 "hostUsers",
538 "hostname",
539 "hostnameOverride",
540 "imagePullSecrets",
541 "initContainers",
542 "nodeName",
543 "nodeSelector",
544 "os",
545 "overhead",
546 "preemptionPolicy",
547 "priority",
548 "priorityClassName",
549 "readinessGates",
550 "resourceClaims",
551 "resources",
552 "restartPolicy",
553 "runtimeClassName",
554 "schedulerName",
555 "schedulingGates",
556 "securityContext",
557 "serviceAccount",
558 "serviceAccountName",
559 "setHostnameAsFQDN",
560 "shareProcessNamespace",
561 "subdomain",
562 "terminationGracePeriodSeconds",
563 "tolerations",
564 "topologySpreadConstraints",
565 "volumes",
566 "workloadRef",
567 ],
568 Visitor,
569 )
570 }
571}
572
573impl crate::serde::Serialize for PodSpec {
574 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
575 let mut state = serializer.serialize_struct(
576 "PodSpec",
577 1 +
578 self.active_deadline_seconds.as_ref().map_or(0, |_| 1) +
579 self.affinity.as_ref().map_or(0, |_| 1) +
580 self.automount_service_account_token.as_ref().map_or(0, |_| 1) +
581 self.dns_config.as_ref().map_or(0, |_| 1) +
582 self.dns_policy.as_ref().map_or(0, |_| 1) +
583 self.enable_service_links.as_ref().map_or(0, |_| 1) +
584 self.ephemeral_containers.as_ref().map_or(0, |_| 1) +
585 self.host_aliases.as_ref().map_or(0, |_| 1) +
586 self.host_ipc.as_ref().map_or(0, |_| 1) +
587 self.host_network.as_ref().map_or(0, |_| 1) +
588 self.host_pid.as_ref().map_or(0, |_| 1) +
589 self.host_users.as_ref().map_or(0, |_| 1) +
590 self.hostname.as_ref().map_or(0, |_| 1) +
591 self.hostname_override.as_ref().map_or(0, |_| 1) +
592 self.image_pull_secrets.as_ref().map_or(0, |_| 1) +
593 self.init_containers.as_ref().map_or(0, |_| 1) +
594 self.node_name.as_ref().map_or(0, |_| 1) +
595 self.node_selector.as_ref().map_or(0, |_| 1) +
596 self.os.as_ref().map_or(0, |_| 1) +
597 self.overhead.as_ref().map_or(0, |_| 1) +
598 self.preemption_policy.as_ref().map_or(0, |_| 1) +
599 self.priority.as_ref().map_or(0, |_| 1) +
600 self.priority_class_name.as_ref().map_or(0, |_| 1) +
601 self.readiness_gates.as_ref().map_or(0, |_| 1) +
602 self.resource_claims.as_ref().map_or(0, |_| 1) +
603 self.resources.as_ref().map_or(0, |_| 1) +
604 self.restart_policy.as_ref().map_or(0, |_| 1) +
605 self.runtime_class_name.as_ref().map_or(0, |_| 1) +
606 self.scheduler_name.as_ref().map_or(0, |_| 1) +
607 self.scheduling_gates.as_ref().map_or(0, |_| 1) +
608 self.security_context.as_ref().map_or(0, |_| 1) +
609 self.service_account.as_ref().map_or(0, |_| 1) +
610 self.service_account_name.as_ref().map_or(0, |_| 1) +
611 self.set_hostname_as_fqdn.as_ref().map_or(0, |_| 1) +
612 self.share_process_namespace.as_ref().map_or(0, |_| 1) +
613 self.subdomain.as_ref().map_or(0, |_| 1) +
614 self.termination_grace_period_seconds.as_ref().map_or(0, |_| 1) +
615 self.tolerations.as_ref().map_or(0, |_| 1) +
616 self.topology_spread_constraints.as_ref().map_or(0, |_| 1) +
617 self.volumes.as_ref().map_or(0, |_| 1) +
618 self.workload_ref.as_ref().map_or(0, |_| 1),
619 )?;
620 if let Some(value) = &self.active_deadline_seconds {
621 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "activeDeadlineSeconds", value)?;
622 }
623 if let Some(value) = &self.affinity {
624 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "affinity", value)?;
625 }
626 if let Some(value) = &self.automount_service_account_token {
627 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "automountServiceAccountToken", value)?;
628 }
629 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "containers", &self.containers)?;
630 if let Some(value) = &self.dns_config {
631 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "dnsConfig", value)?;
632 }
633 if let Some(value) = &self.dns_policy {
634 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "dnsPolicy", value)?;
635 }
636 if let Some(value) = &self.enable_service_links {
637 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "enableServiceLinks", value)?;
638 }
639 if let Some(value) = &self.ephemeral_containers {
640 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "ephemeralContainers", value)?;
641 }
642 if let Some(value) = &self.host_aliases {
643 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostAliases", value)?;
644 }
645 if let Some(value) = &self.host_ipc {
646 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostIPC", value)?;
647 }
648 if let Some(value) = &self.host_network {
649 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostNetwork", value)?;
650 }
651 if let Some(value) = &self.host_pid {
652 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostPID", value)?;
653 }
654 if let Some(value) = &self.host_users {
655 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostUsers", value)?;
656 }
657 if let Some(value) = &self.hostname {
658 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostname", value)?;
659 }
660 if let Some(value) = &self.hostname_override {
661 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostnameOverride", value)?;
662 }
663 if let Some(value) = &self.image_pull_secrets {
664 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "imagePullSecrets", value)?;
665 }
666 if let Some(value) = &self.init_containers {
667 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "initContainers", value)?;
668 }
669 if let Some(value) = &self.node_name {
670 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nodeName", value)?;
671 }
672 if let Some(value) = &self.node_selector {
673 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nodeSelector", value)?;
674 }
675 if let Some(value) = &self.os {
676 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "os", value)?;
677 }
678 if let Some(value) = &self.overhead {
679 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "overhead", value)?;
680 }
681 if let Some(value) = &self.preemption_policy {
682 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "preemptionPolicy", value)?;
683 }
684 if let Some(value) = &self.priority {
685 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "priority", value)?;
686 }
687 if let Some(value) = &self.priority_class_name {
688 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "priorityClassName", value)?;
689 }
690 if let Some(value) = &self.readiness_gates {
691 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "readinessGates", value)?;
692 }
693 if let Some(value) = &self.resource_claims {
694 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "resourceClaims", value)?;
695 }
696 if let Some(value) = &self.resources {
697 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "resources", value)?;
698 }
699 if let Some(value) = &self.restart_policy {
700 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "restartPolicy", value)?;
701 }
702 if let Some(value) = &self.runtime_class_name {
703 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "runtimeClassName", value)?;
704 }
705 if let Some(value) = &self.scheduler_name {
706 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "schedulerName", value)?;
707 }
708 if let Some(value) = &self.scheduling_gates {
709 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "schedulingGates", value)?;
710 }
711 if let Some(value) = &self.security_context {
712 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "securityContext", value)?;
713 }
714 if let Some(value) = &self.service_account {
715 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "serviceAccount", value)?;
716 }
717 if let Some(value) = &self.service_account_name {
718 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "serviceAccountName", value)?;
719 }
720 if let Some(value) = &self.set_hostname_as_fqdn {
721 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "setHostnameAsFQDN", value)?;
722 }
723 if let Some(value) = &self.share_process_namespace {
724 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "shareProcessNamespace", value)?;
725 }
726 if let Some(value) = &self.subdomain {
727 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "subdomain", value)?;
728 }
729 if let Some(value) = &self.termination_grace_period_seconds {
730 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "terminationGracePeriodSeconds", value)?;
731 }
732 if let Some(value) = &self.tolerations {
733 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "tolerations", value)?;
734 }
735 if let Some(value) = &self.topology_spread_constraints {
736 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "topologySpreadConstraints", value)?;
737 }
738 if let Some(value) = &self.volumes {
739 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "volumes", value)?;
740 }
741 if let Some(value) = &self.workload_ref {
742 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "workloadRef", value)?;
743 }
744 crate::serde::ser::SerializeStruct::end(state)
745 }
746}
747
748#[cfg(feature = "schemars")]
749impl crate::schemars::JsonSchema for PodSpec {
750 fn schema_name() -> std::borrow::Cow<'static, str> {
751 "io.k8s.api.core.v1.PodSpec".into()
752 }
753
754 fn json_schema(__gen: &mut crate::schemars::SchemaGenerator) -> crate::schemars::Schema {
755 crate::schemars::json_schema!({
756 "description": "PodSpec is a description of a pod.",
757 "type": "object",
758 "properties": {
759 "activeDeadlineSeconds": {
760 "description": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
761 "type": "integer",
762 "format": "int64",
763 },
764 "affinity": ({
765 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::Affinity>();
766 schema_obj.ensure_object().insert("description".into(), "If specified, the pod's scheduling constraints".into());
767 schema_obj
768 }),
769 "automountServiceAccountToken": {
770 "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
771 "type": "boolean",
772 },
773 "containers": {
774 "description": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
775 "type": "array",
776 "items": (__gen.subschema_for::<crate::api::core::v1::Container>()),
777 },
778 "dnsConfig": ({
779 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PodDNSConfig>();
780 schema_obj.ensure_object().insert("description".into(), "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.".into());
781 schema_obj
782 }),
783 "dnsPolicy": {
784 "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
785 "type": "string",
786 },
787 "enableServiceLinks": {
788 "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
789 "type": "boolean",
790 },
791 "ephemeralContainers": {
792 "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.",
793 "type": "array",
794 "items": (__gen.subschema_for::<crate::api::core::v1::EphemeralContainer>()),
795 },
796 "hostAliases": {
797 "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.",
798 "type": "array",
799 "items": (__gen.subschema_for::<crate::api::core::v1::HostAlias>()),
800 },
801 "hostIPC": {
802 "description": "Use the host's ipc namespace. Optional: Default to false.",
803 "type": "boolean",
804 },
805 "hostNetwork": {
806 "description": "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.",
807 "type": "boolean",
808 },
809 "hostPID": {
810 "description": "Use the host's pid namespace. Optional: Default to false.",
811 "type": "boolean",
812 },
813 "hostUsers": {
814 "description": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
815 "type": "boolean",
816 },
817 "hostname": {
818 "description": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
819 "type": "string",
820 },
821 "hostnameOverride": {
822 "description": "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.",
823 "type": "string",
824 },
825 "imagePullSecrets": {
826 "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
827 "type": "array",
828 "items": (__gen.subschema_for::<crate::api::core::v1::LocalObjectReference>()),
829 },
830 "initContainers": {
831 "description": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
832 "type": "array",
833 "items": (__gen.subschema_for::<crate::api::core::v1::Container>()),
834 },
835 "nodeName": {
836 "description": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
837 "type": "string",
838 },
839 "nodeSelector": {
840 "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
841 "type": "object",
842 "additionalProperties": {
843 "type": "string",
844 },
845 },
846 "os": ({
847 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PodOS>();
848 schema_obj.ensure_object().insert("description".into(), "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup".into());
849 schema_obj
850 }),
851 "overhead": {
852 "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
853 "type": "object",
854 "additionalProperties": (__gen.subschema_for::<crate::apimachinery::pkg::api::resource::Quantity>()),
855 },
856 "preemptionPolicy": {
857 "description": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.",
858 "type": "string",
859 },
860 "priority": {
861 "description": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
862 "type": "integer",
863 "format": "int32",
864 },
865 "priorityClassName": {
866 "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
867 "type": "string",
868 },
869 "readinessGates": {
870 "description": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates",
871 "type": "array",
872 "items": (__gen.subschema_for::<crate::api::core::v1::PodReadinessGate>()),
873 },
874 "resourceClaims": {
875 "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is a stable field but requires that the DynamicResourceAllocation feature gate is enabled.\n\nThis field is immutable.",
876 "type": "array",
877 "items": (__gen.subschema_for::<crate::api::core::v1::PodResourceClaim>()),
878 },
879 "resources": ({
880 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::ResourceRequirements>();
881 schema_obj.ensure_object().insert("description".into(), "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.".into());
882 schema_obj
883 }),
884 "restartPolicy": {
885 "description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
886 "type": "string",
887 },
888 "runtimeClassName": {
889 "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class",
890 "type": "string",
891 },
892 "schedulerName": {
893 "description": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
894 "type": "string",
895 },
896 "schedulingGates": {
897 "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
898 "type": "array",
899 "items": (__gen.subschema_for::<crate::api::core::v1::PodSchedulingGate>()),
900 },
901 "securityContext": ({
902 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PodSecurityContext>();
903 schema_obj.ensure_object().insert("description".into(), "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.".into());
904 schema_obj
905 }),
906 "serviceAccount": {
907 "description": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
908 "type": "string",
909 },
910 "serviceAccountName": {
911 "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
912 "type": "string",
913 },
914 "setHostnameAsFQDN": {
915 "description": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
916 "type": "boolean",
917 },
918 "shareProcessNamespace": {
919 "description": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
920 "type": "boolean",
921 },
922 "subdomain": {
923 "description": "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.",
924 "type": "string",
925 },
926 "terminationGracePeriodSeconds": {
927 "description": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
928 "type": "integer",
929 "format": "int64",
930 },
931 "tolerations": {
932 "description": "If specified, the pod's tolerations.",
933 "type": "array",
934 "items": (__gen.subschema_for::<crate::api::core::v1::Toleration>()),
935 },
936 "topologySpreadConstraints": {
937 "description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
938 "type": "array",
939 "items": (__gen.subschema_for::<crate::api::core::v1::TopologySpreadConstraint>()),
940 },
941 "volumes": {
942 "description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
943 "type": "array",
944 "items": (__gen.subschema_for::<crate::api::core::v1::Volume>()),
945 },
946 "workloadRef": ({
947 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::WorkloadReference>();
948 schema_obj.ensure_object().insert("description".into(), "WorkloadRef provides a reference to the Workload object that this Pod belongs to. This field is used by the scheduler to identify the PodGroup and apply the correct group scheduling policies. The Workload object referenced by this field may not exist at the time the Pod is created. This field is immutable, but a Workload object with the same name may be recreated with different policies. Doing this during pod scheduling may result in the placement not conforming to the expected policies.".into());
949 schema_obj
950 }),
951 },
952 "required": [
953 "containers",
954 ],
955 })
956 }
957}