1#[derive(Clone, Debug, Default, PartialEq)]
5pub struct TopologySpreadConstraint {
6 pub label_selector: Option<crate::apimachinery::pkg::apis::meta::v1::LabelSelector>,
8
9 pub match_label_keys: Option<std::vec::Vec<std::string::String>>,
13
14 pub max_skew: i32,
16
17 pub min_domains: Option<i32>,
21
22 pub node_affinity_policy: Option<std::string::String>,
26
27 pub node_taints_policy: Option<std::string::String>,
31
32 pub topology_key: std::string::String,
34
35 pub when_unsatisfiable: std::string::String,
40}
41
42impl crate::DeepMerge for TopologySpreadConstraint {
43 fn merge_from(&mut self, other: Self) {
44 crate::DeepMerge::merge_from(&mut self.label_selector, other.label_selector);
45 crate::merge_strategies::list::atomic(&mut self.match_label_keys, other.match_label_keys);
46 crate::DeepMerge::merge_from(&mut self.max_skew, other.max_skew);
47 crate::DeepMerge::merge_from(&mut self.min_domains, other.min_domains);
48 crate::DeepMerge::merge_from(&mut self.node_affinity_policy, other.node_affinity_policy);
49 crate::DeepMerge::merge_from(&mut self.node_taints_policy, other.node_taints_policy);
50 crate::DeepMerge::merge_from(&mut self.topology_key, other.topology_key);
51 crate::DeepMerge::merge_from(&mut self.when_unsatisfiable, other.when_unsatisfiable);
52 }
53}
54
55impl<'de> crate::serde::Deserialize<'de> for TopologySpreadConstraint {
56 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
57 #[allow(non_camel_case_types)]
58 enum Field {
59 Key_label_selector,
60 Key_match_label_keys,
61 Key_max_skew,
62 Key_min_domains,
63 Key_node_affinity_policy,
64 Key_node_taints_policy,
65 Key_topology_key,
66 Key_when_unsatisfiable,
67 Other,
68 }
69
70 impl<'de> crate::serde::Deserialize<'de> for Field {
71 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
72 struct Visitor;
73
74 impl crate::serde::de::Visitor<'_> for Visitor {
75 type Value = Field;
76
77 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
78 f.write_str("field identifier")
79 }
80
81 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
82 Ok(match v {
83 "labelSelector" => Field::Key_label_selector,
84 "matchLabelKeys" => Field::Key_match_label_keys,
85 "maxSkew" => Field::Key_max_skew,
86 "minDomains" => Field::Key_min_domains,
87 "nodeAffinityPolicy" => Field::Key_node_affinity_policy,
88 "nodeTaintsPolicy" => Field::Key_node_taints_policy,
89 "topologyKey" => Field::Key_topology_key,
90 "whenUnsatisfiable" => Field::Key_when_unsatisfiable,
91 _ => Field::Other,
92 })
93 }
94 }
95
96 deserializer.deserialize_identifier(Visitor)
97 }
98 }
99
100 struct Visitor;
101
102 impl<'de> crate::serde::de::Visitor<'de> for Visitor {
103 type Value = TopologySpreadConstraint;
104
105 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
106 f.write_str("TopologySpreadConstraint")
107 }
108
109 fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
110 let mut value_label_selector: Option<crate::apimachinery::pkg::apis::meta::v1::LabelSelector> = None;
111 let mut value_match_label_keys: Option<std::vec::Vec<std::string::String>> = None;
112 let mut value_max_skew: Option<i32> = None;
113 let mut value_min_domains: Option<i32> = None;
114 let mut value_node_affinity_policy: Option<std::string::String> = None;
115 let mut value_node_taints_policy: Option<std::string::String> = None;
116 let mut value_topology_key: Option<std::string::String> = None;
117 let mut value_when_unsatisfiable: Option<std::string::String> = None;
118
119 while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
120 match key {
121 Field::Key_label_selector => value_label_selector = crate::serde::de::MapAccess::next_value(&mut map)?,
122 Field::Key_match_label_keys => value_match_label_keys = crate::serde::de::MapAccess::next_value(&mut map)?,
123 Field::Key_max_skew => value_max_skew = crate::serde::de::MapAccess::next_value(&mut map)?,
124 Field::Key_min_domains => value_min_domains = crate::serde::de::MapAccess::next_value(&mut map)?,
125 Field::Key_node_affinity_policy => value_node_affinity_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
126 Field::Key_node_taints_policy => value_node_taints_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
127 Field::Key_topology_key => value_topology_key = crate::serde::de::MapAccess::next_value(&mut map)?,
128 Field::Key_when_unsatisfiable => value_when_unsatisfiable = crate::serde::de::MapAccess::next_value(&mut map)?,
129 Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
130 }
131 }
132
133 Ok(TopologySpreadConstraint {
134 label_selector: value_label_selector,
135 match_label_keys: value_match_label_keys,
136 max_skew: value_max_skew.unwrap_or_default(),
137 min_domains: value_min_domains,
138 node_affinity_policy: value_node_affinity_policy,
139 node_taints_policy: value_node_taints_policy,
140 topology_key: value_topology_key.unwrap_or_default(),
141 when_unsatisfiable: value_when_unsatisfiable.unwrap_or_default(),
142 })
143 }
144 }
145
146 deserializer.deserialize_struct(
147 "TopologySpreadConstraint",
148 &[
149 "labelSelector",
150 "matchLabelKeys",
151 "maxSkew",
152 "minDomains",
153 "nodeAffinityPolicy",
154 "nodeTaintsPolicy",
155 "topologyKey",
156 "whenUnsatisfiable",
157 ],
158 Visitor,
159 )
160 }
161}
162
163impl crate::serde::Serialize for TopologySpreadConstraint {
164 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
165 let mut state = serializer.serialize_struct(
166 "TopologySpreadConstraint",
167 3 +
168 self.label_selector.as_ref().map_or(0, |_| 1) +
169 self.match_label_keys.as_ref().map_or(0, |_| 1) +
170 self.min_domains.as_ref().map_or(0, |_| 1) +
171 self.node_affinity_policy.as_ref().map_or(0, |_| 1) +
172 self.node_taints_policy.as_ref().map_or(0, |_| 1),
173 )?;
174 if let Some(value) = &self.label_selector {
175 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "labelSelector", value)?;
176 }
177 if let Some(value) = &self.match_label_keys {
178 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "matchLabelKeys", value)?;
179 }
180 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "maxSkew", &self.max_skew)?;
181 if let Some(value) = &self.min_domains {
182 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "minDomains", value)?;
183 }
184 if let Some(value) = &self.node_affinity_policy {
185 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nodeAffinityPolicy", value)?;
186 }
187 if let Some(value) = &self.node_taints_policy {
188 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nodeTaintsPolicy", value)?;
189 }
190 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "topologyKey", &self.topology_key)?;
191 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "whenUnsatisfiable", &self.when_unsatisfiable)?;
192 crate::serde::ser::SerializeStruct::end(state)
193 }
194}
195
196#[cfg(feature = "schemars")]
197impl crate::schemars::JsonSchema for TopologySpreadConstraint {
198 fn schema_name() -> std::borrow::Cow<'static, str> {
199 "io.k8s.api.core.v1.TopologySpreadConstraint".into()
200 }
201
202 fn json_schema(__gen: &mut crate::schemars::SchemaGenerator) -> crate::schemars::Schema {
203 crate::schemars::json_schema!({
204 "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
205 "type": "object",
206 "properties": {
207 "labelSelector": ({
208 let mut schema_obj = __gen.subschema_for::<crate::apimachinery::pkg::apis::meta::v1::LabelSelector>();
209 schema_obj.ensure_object().insert("description".into(), "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.".into());
210 schema_obj
211 }),
212 "matchLabelKeys": {
213 "description": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).",
214 "type": "array",
215 "items": {
216 "type": "string",
217 },
218 },
219 "maxSkew": {
220 "description": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.",
221 "type": "integer",
222 "format": "int32",
223 },
224 "minDomains": {
225 "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.",
226 "type": "integer",
227 "format": "int32",
228 },
229 "nodeAffinityPolicy": {
230 "description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.",
231 "type": "string",
232 },
233 "nodeTaintsPolicy": {
234 "description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.",
235 "type": "string",
236 },
237 "topologyKey": {
238 "description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
239 "type": "string",
240 },
241 "whenUnsatisfiable": {
242 "description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.",
243 "type": "string",
244 },
245 },
246 "required": [
247 "maxSkew",
248 "topologyKey",
249 "whenUnsatisfiable",
250 ],
251 })
252 }
253}