|
| 1 | +/* |
| 2 | +Copyright 2023. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package v1alpha1 |
| 18 | + |
| 19 | +import ( |
| 20 | + corev1 "k8s.io/api/core/v1" |
| 21 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 22 | +) |
| 23 | + |
| 24 | +const ( |
| 25 | + // HadoopJobKind is the kind name. |
| 26 | + HadoopJobKind = "HadoopJob" |
| 27 | + // HadoopJobPlural is the TensorflowPlural for HadoopJob. |
| 28 | + HadoopJobPlural = "HadoopJobs" |
| 29 | + // HadoopJobSingular is the singular for HadoopJob. |
| 30 | + HadoopJobSingular = "HadoopJob" |
| 31 | + |
| 32 | + // JobNameLabel represents the label key for the cluster name, the value is the cluster name. |
| 33 | + JobNameLabel = "kubeclusetr.org/job-name" |
| 34 | +) |
| 35 | + |
| 36 | +// SparkApplicationType describes the type of a Spark application. |
| 37 | +type SparkApplicationType string |
| 38 | + |
| 39 | +// Different types of Spark applications. |
| 40 | +const ( |
| 41 | + JavaApplicationType SparkApplicationType = "Java" |
| 42 | + ScalaApplicationType SparkApplicationType = "Scala" |
| 43 | + PythonApplicationType SparkApplicationType = "Python" |
| 44 | + RApplicationType SparkApplicationType = "R" |
| 45 | +) |
| 46 | + |
| 47 | +// HadoopJobSpec defines the desired state of HadoopJob |
| 48 | +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. |
| 49 | +type HadoopJobSpec struct { |
| 50 | + // MainFile is the path to a bundled JAR, Python, or R file of the application. |
| 51 | + MainApplicationFile string `json:"mainApplicationFile"` |
| 52 | + |
| 53 | + // Arguments is a list of arguments to be passed to the application. |
| 54 | + // +optional |
| 55 | + Arguments []string `json:"arguments,omitempty"` |
| 56 | + |
| 57 | + ExecutorSpec HadoopNodeSpec `json:"executorSpec,omitempty"` |
| 58 | +} |
| 59 | + |
| 60 | +// +k8s:openapi-gen=true |
| 61 | +// +k8s:deepcopy-gen=true |
| 62 | +// JobCondition describes current state of a cluster |
| 63 | +type JobCondition struct { |
| 64 | + // Type of job condition. |
| 65 | + Type JobConditionType `json:"type"` |
| 66 | + // Status of the condition, one of True, False, Unknown. |
| 67 | + Status corev1.ConditionStatus `json:"status"` |
| 68 | + // The reason for the condition's last transition. |
| 69 | + Reason string `json:"reason,omitempty"` |
| 70 | + // A human readable message indicating details about the transition. |
| 71 | + Message string `json:"message,omitempty"` |
| 72 | + // The last time this condition was updated. |
| 73 | + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` |
| 74 | + // Last time the condition transitioned from one status to another. |
| 75 | + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` |
| 76 | +} |
| 77 | + |
| 78 | +type JobConditionType string |
| 79 | + |
| 80 | +const ( |
| 81 | + // JobCreated means the job has been accepted by the system, |
| 82 | + // but one or more of the pods/services has not been started. |
| 83 | + // This includes time before pods being scheduled and launched. |
| 84 | + JobCreated JobConditionType = "Created" |
| 85 | + |
| 86 | + // JobSubmitted means all sub-resources (e.g. services/pods) of this job |
| 87 | + // have been successfully submitted. |
| 88 | + JobSubmitted JobConditionType = "Submitted" |
| 89 | + |
| 90 | + // JobRunning means all sub-resources (e.g. services/pods) of this job |
| 91 | + // have been successfully scheduled and launched. |
| 92 | + // The training is running without error. |
| 93 | + JobRunning JobConditionType = "Running" |
| 94 | + |
| 95 | + // JobSucceeded means all sub-resources (e.g. services/pods) of this job |
| 96 | + // reached phase have terminated in success. |
| 97 | + // The training is complete without error. |
| 98 | + JobSucceeded JobConditionType = "Succeeded" |
| 99 | + |
| 100 | + // JobFailed means one or more sub-resources (e.g. services/pods) of this job |
| 101 | + // reached phase failed with no restarting. |
| 102 | + // The training has failed its execution. |
| 103 | + JobFailed JobConditionType = "Failed" |
| 104 | +) |
| 105 | + |
| 106 | +// +k8s:openapi-gen=true |
| 107 | +// +k8s:deepcopy-gen=true |
| 108 | +// HadoopJobStatus defines the observed state of HadoopJob |
| 109 | +type HadoopJobStatus struct { |
| 110 | + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster |
| 111 | + // Important: Run "make" to regenerate code after modifying this file |
| 112 | + // Conditions is an array of current observed job conditions. |
| 113 | + Conditions []JobCondition `json:"conditions"` |
| 114 | + |
| 115 | + // Represents time when the job was acknowledged by the job controller. |
| 116 | + // It is not guaranteed to be set in happens-before order across separate operations. |
| 117 | + // It is represented in RFC3339 form and is in UTC. |
| 118 | + StartTime *metav1.Time `json:"startTime,omitempty"` |
| 119 | + |
| 120 | + // Represents time when the job was completed. It is not guaranteed to |
| 121 | + // be set in happens-before order across separate operations. |
| 122 | + // It is represented in RFC3339 form and is in UTC. |
| 123 | + CompletionTime *metav1.Time `json:"completionTime,omitempty"` |
| 124 | +} |
| 125 | + |
| 126 | +// +genclient |
| 127 | +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object |
| 128 | +// +resource:path=hadoopjobs |
| 129 | +// +kubebuilder:object:root=true |
| 130 | +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date |
| 131 | +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[-1:].type`,name="State",type=string |
| 132 | +// +kubebuilder:subresource:status |
| 133 | +// +kubebuilder:resource:scope=Namespaced,path=hadoopjobs,shortName={"hdj","hdjs"} |
| 134 | +// HadoopJob is the Schema for the hadoopjobs API |
| 135 | +type HadoopJob struct { |
| 136 | + metav1.TypeMeta `json:",inline"` |
| 137 | + metav1.ObjectMeta `json:"metadata,omitempty"` |
| 138 | + |
| 139 | + Spec HadoopJobSpec `json:"spec,omitempty"` |
| 140 | + Status HadoopJobStatus `json:"status,omitempty"` |
| 141 | +} |
| 142 | + |
| 143 | +//+kubebuilder:object:root=true |
| 144 | +// +k8s:deepcopy-gen:interfaces=k8s.io/apimzxachinery/pkg/runtime.Object |
| 145 | +// +resource:path=hadoopjobs |
| 146 | + |
| 147 | +// HadoopJobList contains a list of HadoopJob |
| 148 | +type HadoopJobList struct { |
| 149 | + metav1.TypeMeta `json:",inline"` |
| 150 | + metav1.ListMeta `json:"metadata,omitempty"` |
| 151 | + Items []HadoopJob `json:"items"` |
| 152 | +} |
| 153 | + |
| 154 | +func init() { |
| 155 | + SchemeBuilder.Register(&HadoopJob{}, &HadoopJobList{}) |
| 156 | +} |
0 commit comments