1#[derive(Clone, Debug, Default, PartialEq)]
5pub struct JobSpec {
6 pub active_deadline_seconds: Option<i64>,
8
9 pub backoff_limit: Option<i32>,
11
12 pub backoff_limit_per_index: Option<i32>,
14
15 pub completion_mode: Option<std::string::String>,
23
24 pub completions: Option<i32>,
26
27 pub managed_by: Option<std::string::String>,
29
30 pub manual_selector: Option<bool>,
32
33 pub max_failed_indexes: Option<i32>,
35
36 pub parallelism: Option<i32>,
38
39 pub pod_failure_policy: Option<crate::api::batch::v1::PodFailurePolicy>,
41
42 pub pod_replacement_policy: Option<std::string::String>,
49
50 pub selector: Option<crate::apimachinery::pkg::apis::meta::v1::LabelSelector>,
52
53 pub success_policy: Option<crate::api::batch::v1::SuccessPolicy>,
55
56 pub suspend: Option<bool>,
58
59 pub template: crate::api::core::v1::PodTemplateSpec,
61
62 pub ttl_seconds_after_finished: Option<i32>,
64}
65
66impl crate::DeepMerge for JobSpec {
67 fn merge_from(&mut self, other: Self) {
68 crate::DeepMerge::merge_from(&mut self.active_deadline_seconds, other.active_deadline_seconds);
69 crate::DeepMerge::merge_from(&mut self.backoff_limit, other.backoff_limit);
70 crate::DeepMerge::merge_from(&mut self.backoff_limit_per_index, other.backoff_limit_per_index);
71 crate::DeepMerge::merge_from(&mut self.completion_mode, other.completion_mode);
72 crate::DeepMerge::merge_from(&mut self.completions, other.completions);
73 crate::DeepMerge::merge_from(&mut self.managed_by, other.managed_by);
74 crate::DeepMerge::merge_from(&mut self.manual_selector, other.manual_selector);
75 crate::DeepMerge::merge_from(&mut self.max_failed_indexes, other.max_failed_indexes);
76 crate::DeepMerge::merge_from(&mut self.parallelism, other.parallelism);
77 crate::DeepMerge::merge_from(&mut self.pod_failure_policy, other.pod_failure_policy);
78 crate::DeepMerge::merge_from(&mut self.pod_replacement_policy, other.pod_replacement_policy);
79 crate::DeepMerge::merge_from(&mut self.selector, other.selector);
80 crate::DeepMerge::merge_from(&mut self.success_policy, other.success_policy);
81 crate::DeepMerge::merge_from(&mut self.suspend, other.suspend);
82 crate::DeepMerge::merge_from(&mut self.template, other.template);
83 crate::DeepMerge::merge_from(&mut self.ttl_seconds_after_finished, other.ttl_seconds_after_finished);
84 }
85}
86
87impl<'de> crate::serde::Deserialize<'de> for JobSpec {
88 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
89 #[allow(non_camel_case_types)]
90 enum Field {
91 Key_active_deadline_seconds,
92 Key_backoff_limit,
93 Key_backoff_limit_per_index,
94 Key_completion_mode,
95 Key_completions,
96 Key_managed_by,
97 Key_manual_selector,
98 Key_max_failed_indexes,
99 Key_parallelism,
100 Key_pod_failure_policy,
101 Key_pod_replacement_policy,
102 Key_selector,
103 Key_success_policy,
104 Key_suspend,
105 Key_template,
106 Key_ttl_seconds_after_finished,
107 Other,
108 }
109
110 impl<'de> crate::serde::Deserialize<'de> for Field {
111 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
112 struct Visitor;
113
114 impl crate::serde::de::Visitor<'_> for Visitor {
115 type Value = Field;
116
117 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
118 f.write_str("field identifier")
119 }
120
121 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
122 Ok(match v {
123 "activeDeadlineSeconds" => Field::Key_active_deadline_seconds,
124 "backoffLimit" => Field::Key_backoff_limit,
125 "backoffLimitPerIndex" => Field::Key_backoff_limit_per_index,
126 "completionMode" => Field::Key_completion_mode,
127 "completions" => Field::Key_completions,
128 "managedBy" => Field::Key_managed_by,
129 "manualSelector" => Field::Key_manual_selector,
130 "maxFailedIndexes" => Field::Key_max_failed_indexes,
131 "parallelism" => Field::Key_parallelism,
132 "podFailurePolicy" => Field::Key_pod_failure_policy,
133 "podReplacementPolicy" => Field::Key_pod_replacement_policy,
134 "selector" => Field::Key_selector,
135 "successPolicy" => Field::Key_success_policy,
136 "suspend" => Field::Key_suspend,
137 "template" => Field::Key_template,
138 "ttlSecondsAfterFinished" => Field::Key_ttl_seconds_after_finished,
139 _ => Field::Other,
140 })
141 }
142 }
143
144 deserializer.deserialize_identifier(Visitor)
145 }
146 }
147
148 struct Visitor;
149
150 impl<'de> crate::serde::de::Visitor<'de> for Visitor {
151 type Value = JobSpec;
152
153 fn expecting(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
154 f.write_str("JobSpec")
155 }
156
157 fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
158 let mut value_active_deadline_seconds: Option<i64> = None;
159 let mut value_backoff_limit: Option<i32> = None;
160 let mut value_backoff_limit_per_index: Option<i32> = None;
161 let mut value_completion_mode: Option<std::string::String> = None;
162 let mut value_completions: Option<i32> = None;
163 let mut value_managed_by: Option<std::string::String> = None;
164 let mut value_manual_selector: Option<bool> = None;
165 let mut value_max_failed_indexes: Option<i32> = None;
166 let mut value_parallelism: Option<i32> = None;
167 let mut value_pod_failure_policy: Option<crate::api::batch::v1::PodFailurePolicy> = None;
168 let mut value_pod_replacement_policy: Option<std::string::String> = None;
169 let mut value_selector: Option<crate::apimachinery::pkg::apis::meta::v1::LabelSelector> = None;
170 let mut value_success_policy: Option<crate::api::batch::v1::SuccessPolicy> = None;
171 let mut value_suspend: Option<bool> = None;
172 let mut value_template: Option<crate::api::core::v1::PodTemplateSpec> = None;
173 let mut value_ttl_seconds_after_finished: Option<i32> = None;
174
175 while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
176 match key {
177 Field::Key_active_deadline_seconds => value_active_deadline_seconds = crate::serde::de::MapAccess::next_value(&mut map)?,
178 Field::Key_backoff_limit => value_backoff_limit = crate::serde::de::MapAccess::next_value(&mut map)?,
179 Field::Key_backoff_limit_per_index => value_backoff_limit_per_index = crate::serde::de::MapAccess::next_value(&mut map)?,
180 Field::Key_completion_mode => value_completion_mode = crate::serde::de::MapAccess::next_value(&mut map)?,
181 Field::Key_completions => value_completions = crate::serde::de::MapAccess::next_value(&mut map)?,
182 Field::Key_managed_by => value_managed_by = crate::serde::de::MapAccess::next_value(&mut map)?,
183 Field::Key_manual_selector => value_manual_selector = crate::serde::de::MapAccess::next_value(&mut map)?,
184 Field::Key_max_failed_indexes => value_max_failed_indexes = crate::serde::de::MapAccess::next_value(&mut map)?,
185 Field::Key_parallelism => value_parallelism = crate::serde::de::MapAccess::next_value(&mut map)?,
186 Field::Key_pod_failure_policy => value_pod_failure_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
187 Field::Key_pod_replacement_policy => value_pod_replacement_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
188 Field::Key_selector => value_selector = crate::serde::de::MapAccess::next_value(&mut map)?,
189 Field::Key_success_policy => value_success_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
190 Field::Key_suspend => value_suspend = crate::serde::de::MapAccess::next_value(&mut map)?,
191 Field::Key_template => value_template = crate::serde::de::MapAccess::next_value(&mut map)?,
192 Field::Key_ttl_seconds_after_finished => value_ttl_seconds_after_finished = crate::serde::de::MapAccess::next_value(&mut map)?,
193 Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
194 }
195 }
196
197 Ok(JobSpec {
198 active_deadline_seconds: value_active_deadline_seconds,
199 backoff_limit: value_backoff_limit,
200 backoff_limit_per_index: value_backoff_limit_per_index,
201 completion_mode: value_completion_mode,
202 completions: value_completions,
203 managed_by: value_managed_by,
204 manual_selector: value_manual_selector,
205 max_failed_indexes: value_max_failed_indexes,
206 parallelism: value_parallelism,
207 pod_failure_policy: value_pod_failure_policy,
208 pod_replacement_policy: value_pod_replacement_policy,
209 selector: value_selector,
210 success_policy: value_success_policy,
211 suspend: value_suspend,
212 template: value_template.unwrap_or_default(),
213 ttl_seconds_after_finished: value_ttl_seconds_after_finished,
214 })
215 }
216 }
217
218 deserializer.deserialize_struct(
219 "JobSpec",
220 &[
221 "activeDeadlineSeconds",
222 "backoffLimit",
223 "backoffLimitPerIndex",
224 "completionMode",
225 "completions",
226 "managedBy",
227 "manualSelector",
228 "maxFailedIndexes",
229 "parallelism",
230 "podFailurePolicy",
231 "podReplacementPolicy",
232 "selector",
233 "successPolicy",
234 "suspend",
235 "template",
236 "ttlSecondsAfterFinished",
237 ],
238 Visitor,
239 )
240 }
241}
242
243impl crate::serde::Serialize for JobSpec {
244 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
245 let mut state = serializer.serialize_struct(
246 "JobSpec",
247 1 +
248 self.active_deadline_seconds.as_ref().map_or(0, |_| 1) +
249 self.backoff_limit.as_ref().map_or(0, |_| 1) +
250 self.backoff_limit_per_index.as_ref().map_or(0, |_| 1) +
251 self.completion_mode.as_ref().map_or(0, |_| 1) +
252 self.completions.as_ref().map_or(0, |_| 1) +
253 self.managed_by.as_ref().map_or(0, |_| 1) +
254 self.manual_selector.as_ref().map_or(0, |_| 1) +
255 self.max_failed_indexes.as_ref().map_or(0, |_| 1) +
256 self.parallelism.as_ref().map_or(0, |_| 1) +
257 self.pod_failure_policy.as_ref().map_or(0, |_| 1) +
258 self.pod_replacement_policy.as_ref().map_or(0, |_| 1) +
259 self.selector.as_ref().map_or(0, |_| 1) +
260 self.success_policy.as_ref().map_or(0, |_| 1) +
261 self.suspend.as_ref().map_or(0, |_| 1) +
262 self.ttl_seconds_after_finished.as_ref().map_or(0, |_| 1),
263 )?;
264 if let Some(value) = &self.active_deadline_seconds {
265 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "activeDeadlineSeconds", value)?;
266 }
267 if let Some(value) = &self.backoff_limit {
268 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "backoffLimit", value)?;
269 }
270 if let Some(value) = &self.backoff_limit_per_index {
271 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "backoffLimitPerIndex", value)?;
272 }
273 if let Some(value) = &self.completion_mode {
274 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "completionMode", value)?;
275 }
276 if let Some(value) = &self.completions {
277 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "completions", value)?;
278 }
279 if let Some(value) = &self.managed_by {
280 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "managedBy", value)?;
281 }
282 if let Some(value) = &self.manual_selector {
283 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "manualSelector", value)?;
284 }
285 if let Some(value) = &self.max_failed_indexes {
286 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "maxFailedIndexes", value)?;
287 }
288 if let Some(value) = &self.parallelism {
289 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "parallelism", value)?;
290 }
291 if let Some(value) = &self.pod_failure_policy {
292 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "podFailurePolicy", value)?;
293 }
294 if let Some(value) = &self.pod_replacement_policy {
295 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "podReplacementPolicy", value)?;
296 }
297 if let Some(value) = &self.selector {
298 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "selector", value)?;
299 }
300 if let Some(value) = &self.success_policy {
301 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "successPolicy", value)?;
302 }
303 if let Some(value) = &self.suspend {
304 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "suspend", value)?;
305 }
306 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "template", &self.template)?;
307 if let Some(value) = &self.ttl_seconds_after_finished {
308 crate::serde::ser::SerializeStruct::serialize_field(&mut state, "ttlSecondsAfterFinished", value)?;
309 }
310 crate::serde::ser::SerializeStruct::end(state)
311 }
312}
313
314#[cfg(feature = "schemars")]
315impl crate::schemars::JsonSchema for JobSpec {
316 fn schema_name() -> std::borrow::Cow<'static, str> {
317 "io.k8s.api.batch.v1.JobSpec".into()
318 }
319
320 fn json_schema(__gen: &mut crate::schemars::SchemaGenerator) -> crate::schemars::Schema {
321 crate::schemars::json_schema!({
322 "description": "JobSpec describes how the job execution will look like.",
323 "type": "object",
324 "properties": {
325 "activeDeadlineSeconds": {
326 "description": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
327 "type": "integer",
328 "format": "int64",
329 },
330 "backoffLimit": {
331 "description": "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.",
332 "type": "integer",
333 "format": "int32",
334 },
335 "backoffLimitPerIndex": {
336 "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
337 "type": "integer",
338 "format": "int32",
339 },
340 "completionMode": {
341 "description": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
342 "type": "string",
343 },
344 "completions": {
345 "description": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
346 "type": "integer",
347 "format": "int32",
348 },
349 "managedBy": {
350 "description": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.",
351 "type": "string",
352 },
353 "manualSelector": {
354 "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
355 "type": "boolean",
356 },
357 "maxFailedIndexes": {
358 "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
359 "type": "integer",
360 "format": "int32",
361 },
362 "parallelism": {
363 "description": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
364 "type": "integer",
365 "format": "int32",
366 },
367 "podFailurePolicy": ({
368 let mut schema_obj = __gen.subschema_for::<crate::api::batch::v1::PodFailurePolicy>();
369 schema_obj.ensure_object().insert("description".into(), "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.".into());
370 schema_obj
371 }),
372 "podReplacementPolicy": {
373 "description": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.",
374 "type": "string",
375 },
376 "selector": ({
377 let mut schema_obj = __gen.subschema_for::<crate::apimachinery::pkg::apis::meta::v1::LabelSelector>();
378 schema_obj.ensure_object().insert("description".into(), "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors".into());
379 schema_obj
380 }),
381 "successPolicy": ({
382 let mut schema_obj = __gen.subschema_for::<crate::api::batch::v1::SuccessPolicy>();
383 schema_obj.ensure_object().insert("description".into(), "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.".into());
384 schema_obj
385 }),
386 "suspend": {
387 "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
388 "type": "boolean",
389 },
390 "template": ({
391 let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PodTemplateSpec>();
392 schema_obj.ensure_object().insert("description".into(), "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/".into());
393 schema_obj
394 }),
395 "ttlSecondsAfterFinished": {
396 "description": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
397 "type": "integer",
398 "format": "int32",
399 },
400 },
401 "required": [
402 "template",
403 ],
404 })
405 }
406}