diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 2c08bc6..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @bestgopher diff --git a/crdyaml/tenant-crd.yaml b/crdyaml/tenant-crd.yaml new file mode 100644 index 0000000..39b2444 --- /dev/null +++ b/crdyaml/tenant-crd.yaml @@ -0,0 +1,1274 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: tenants.rustfs.com +spec: + group: rustfs.com + names: + categories: [] + kind: Tenant + plural: tenants + shortNames: + - tenant + singular: tenant + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.currentState + name: State + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Auto-generated derived type for TenantSpec via `CustomResource` + properties: + spec: + properties: + createServiceAccountRbac: + nullable: true + type: boolean + credsSecret: + description: |- + Optional reference to a Secret containing RustFS credentials. + The Secret must contain 'accesskey' and 'secretkey' keys (both required, minimum 8 characters each). + If not specified, credentials can be provided via environment variables in 'env'. + Priority: Secret credentials > Environment variables > RustFS built-in defaults. + For production use, always configure credentials via Secret or environment variables. + nullable: true + properties: + name: + description: 'Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + env: + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + - name + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed resources, defaults to "1" + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + - name + type: object + type: object + required: + - name + type: object + type: array + image: + nullable: true + type: string + imagePullPolicy: + description: |- + Image pull policy for containers. + - Always: Always pull the image + - Never: Never pull the image + - IfNotPresent: Pull the image if not present locally (default) + + https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + enum: + - Always + - Never + - IfNotPresent + - null + nullable: true + type: string + imagePullSecret: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + nullable: true + properties: + name: + description: 'Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + lifecycle: + description: Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. + nullable: true + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + mountPath: + default: /data + nullable: true + type: string + podDeletionPolicyWhenNodeIsDown: + description: |- + Controls how the operator handles Pods when the node hosting them is down (NotReady/Unknown). + + Typical use-case: a StatefulSet Pod gets stuck in Terminating when the node goes down. + Setting this to `ForceDelete` allows the operator to force delete the Pod object so the + StatefulSet controller can recreate it elsewhere. + + Values: DoNothing | Delete | ForceDelete + enum: + - DoNothing + - Delete + - ForceDelete + - DeleteStatefulSetPod + - DeleteDeploymentPod + - DeleteBothStatefulSetAndDeploymentPod + - null + nullable: true + type: string + podManagementPolicy: + description: |- + Pod management policy for StatefulSets + - OrderedReady: Respect the ordering guarantees demonstrated + - Parallel: launch or terminate all Pods in parallel, and not to wait for Pods to become Running + and Ready or completely terminated prior to launching or terminating another Pod + + https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + enum: + - OrderedReady + - Parallel + - null + nullable: true + type: string + pools: + items: + description: |- + Kubernetes scheduling and placement configuration for pools. + Groups related scheduling fields for better code organization. + Uses #[serde(flatten)] to maintain flat YAML structure. + properties: + affinity: + description: Affinity is a group of affinity scheduling rules. + nullable: true + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + name: + type: string + x-kubernetes-validations: + - message: pool name must be not empty + rule: self != '' + nodeSelector: + additionalProperties: + type: string + description: NodeSelector is a selector which must be true for the pod to fit on a node. + nullable: true + type: object + persistence: + properties: + annotations: + additionalProperties: + type: string + nullable: true + type: object + labels: + additionalProperties: + type: string + nullable: true + type: object + path: + nullable: true + type: string + x-kubernetes-validations: + - message: path must be not empty when specified + rule: self != '' + volumeClaimTemplate: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + nullable: true + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it''s not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + volumesPerServer: + format: int32 + type: integer + x-kubernetes-validations: + - message: volumesPerServer must be greater than 0 + rule: self > 0 + required: + - volumesPerServer + type: object + priorityClassName: + description: PriorityClassName indicates the pod's priority. Overrides tenant-level priority class. + nullable: true + type: string + resources: + description: Resources describes the compute resource requirements for the pool's containers. + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + servers: + format: int32 + type: integer + x-kubernetes-validations: + - message: servers must be gather than 0 + rule: self > 0 + tolerations: + description: Tolerations allow pods to schedule onto nodes with matching taints. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + nullable: true + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how pods should spread across topology domains. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + nullable: true + type: array + required: + - name + - persistence + - servers + type: object + x-kubernetes-validations: + - messageExpression: '"pool " + self.name + " with 2 servers must have at least 4 volumes in total"' + reason: FieldValueInvalid + rule: '!(self.servers * self.persistence.volumesPerServer < 4 && self.servers == 2)' + - messageExpression: '"pool " + self.name + " with 3 servers must have at least 6 volumes in total"' + reason: FieldValueInvalid + rule: '!(self.servers * self.persistence.volumesPerServer < 4 && self.servers == 3)' + type: array + x-kubernetes-validations: + - message: pools must be configured + rule: self.size() > 0 + priorityClassName: + nullable: true + type: string + scheduler: + nullable: true + type: string + serviceAccountName: + nullable: true + type: string + required: + - pools + type: object + status: + nullable: true + properties: + availableReplicas: + format: int32 + type: integer + conditions: + description: Kubernetes standard conditions + items: + description: Kubernetes standard condition for Tenant resources + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another + nullable: true + type: string + message: + description: Human-readable message indicating details about the transition + type: string + observedGeneration: + description: The generation of the Tenant resource that this condition reflects + format: int64 + nullable: true + type: integer + reason: + description: One-word CamelCase reason for the condition's last transition + type: string + status: + description: Status of the condition (True, False, Unknown) + type: string + type: + description: Type of condition (Ready, Progressing, Degraded) + type: string + required: + - message + - reason + - status + - type + type: object + type: array + currentState: + type: string + observedGeneration: + description: The generation observed by the operator + format: int64 + nullable: true + type: integer + pools: + items: + properties: + currentReplicas: + description: Number of pods with current revision + format: int32 + nullable: true + type: integer + currentRevision: + description: Current revision hash of the StatefulSet + nullable: true + type: string + lastUpdateTime: + description: Last time the pool status was updated + nullable: true + type: string + readyReplicas: + description: Number of pods with Ready condition + format: int32 + nullable: true + type: integer + replicas: + description: Total number of non-terminated pods targeted by this pool's StatefulSet + format: int32 + nullable: true + type: integer + ssName: + description: Name of the StatefulSet for this pool + type: string + state: + description: Current state of the pool + type: string + updateRevision: + description: Update revision hash of the StatefulSet (different from current during rollout) + nullable: true + type: string + updatedReplicas: + description: Number of pods with updated revision + format: int32 + nullable: true + type: integer + required: + - ssName + - state + type: object + type: array + required: + - availableReplicas + - currentState + - pools + type: object + required: + - spec + title: Tenant + type: object + served: true + storage: true + subresources: + status: {} diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md new file mode 100644 index 0000000..55386aa --- /dev/null +++ b/docs/DEVELOPMENT.md @@ -0,0 +1,986 @@ +# RustFS Operator Development Guide + +This guide will help you set up a local development environment for the RustFS Kubernetes Operator. + +--- + +## ๐Ÿ“‹ Prerequisites + +### Required Tools + +1. **Rust Toolchain** (1.91+) + - Project uses Rust Edition 2024 + - Required components: `rustfmt`, `clippy`, `rust-src`, `rust-analyzer` + +2. **Kubernetes Cluster** + - Kubernetes v1.27+ (current target: v1.30) + - For local development, use: + - [kind](https://kind.sigs.k8s.io/) (recommended) + - [minikube](https://minikube.sigs.k8s.io/) + - [k3s](https://k3s.io/) + - Docker Desktop (built-in Kubernetes) + +3. **kubectl** + - For interacting with Kubernetes clusters + +4. **Optional Tools** + - `just` - Task runner (project includes Justfile) + - `cargo-nextest` - Faster test runner + - `docker` - For building container images + - `OpenLens` - Kubernetes cluster management GUI + +--- + +## ๐Ÿš€ Quick Start + +### 1. Install Rust Toolchain + +The project uses `rust-toolchain.toml` to automatically manage the Rust version: + +```bash +# If Rust is not installed yet +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + +# Navigate to project directory (Rust will auto-install correct toolchain version) +cd ~/operator + +# Verify installation +rustc --version +cargo --version +``` + +The toolchain will automatically install: +- `rustfmt` - Code formatter +- `clippy` - Code linter +- `rust-src` - Rust source code +- `rust-analyzer` - IDE support + +### 2. Install Optional Development Tools + +```bash +# Install cargo-nextest (faster test runner) +cargo install cargo-nextest + +# Install just (task runner) +# macOS +brew install just + +# Linux +# Download from https://github.com/casey/just/releases +# Or use package manager +``` + +### 3. Clone the Project (if not already done) + +```bash +git clone https://github.com/rustfs/operator.git +cd operator +``` + +### 4. Verify Project Setup + +```bash +# Check Rust toolchain +rustc --version # Should be 1.91+ + +# Check project dependencies +cargo check + +# Run formatting check +cargo fmt --all --check + +# Run clippy check +cargo clippy --all-targets --all-features -- -D warnings +``` + +--- + +## ๐Ÿ”จ Building the Operator + +### How to Compile the Operator + +The operator can be built using Cargo (standard Rust build tool) or the Justfile task runner. + +#### Method 1: Using Cargo (Standard) + +```bash +# Debug build (faster compilation, larger binary, slower runtime) +cargo build + +# Release build (slower compilation, smaller binary, faster runtime) +cargo build --release + +# Binary locations: +# Debug: target/debug/operator +# Release: target/release/operator +``` + +#### Method 2: Using Justfile (Recommended) + +```bash +# Build Debug binary +just build + +# Build Release binary +just build MODE=release +``` + +#### Build Output + +After building, the operator binary will be located at: +- **Debug**: `target/debug/operator` +- **Release**: `target/release/operator` + +You can run it directly: +```bash +# Run debug binary +./target/debug/operator --help + +# Run release binary +./target/release/operator --help +``` + +#### Build Options + +```bash +# Format code before building +just fmt && just build + +# Run all checks before building +just pre-commit && just build MODE=release + +# Clean and rebuild +cargo clean && cargo build --release +``` + +--- + +## ๐Ÿณ Installing kind + +kind (Kubernetes in Docker) is the recommended tool for local Kubernetes development. + +### Installation + +#### macOS + +```bash +# Using Homebrew (recommended) +brew install kind + +# Verify installation +kind --version +``` + +#### Linux + +```bash +# Download binary from releases +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 +chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind + +# Or using package manager (if available) +# Verify installation +kind --version +``` + +#### Windows + +```bash +# Using Chocolatey +choco install kind + +# Or download from: https://kind.sigs.k8s.io/docs/user/quick-start/ +``` + +### Creating a kind Cluster + +```bash +# Create a cluster named 'rustfs-dev' +kind create cluster --name rustfs-dev + +# Verify cluster is running +kubectl cluster-info --context kind-rustfs-dev + +# List clusters +kind get clusters + +# Check cluster nodes +kubectl get nodes +``` + +### kind Cluster Management + +#### Starting a Cluster + +```bash +# If cluster exists but is stopped, restart it +# Note: kind clusters run in Docker containers, so they persist until deleted +# To "restart", you may need to recreate if Docker was restarted + +# Check if cluster containers are running +docker ps | grep rustfs-dev + +# If containers are stopped, restart Docker or recreate cluster +kind create cluster --name rustfs-dev +``` + +#### Stopping a Cluster + +```bash +# kind clusters run in Docker containers +# To stop, you can stop Docker or delete the cluster + +# Stop Docker Desktop (macOS/Windows) +# Or stop Docker daemon (Linux) +sudo systemctl stop docker + +# Note: Stopping Docker will stop all kind clusters +``` + +#### Restarting a Cluster + +```bash +# If Docker was restarted, kind clusters may need to be recreated +# Check cluster status +kind get clusters + +# If cluster exists but kubectl can't connect, recreate it +kind delete cluster --name rustfs-dev +kind create cluster --name rustfs-dev + +# Restore kubectl context +kubectl cluster-info --context kind-rustfs-dev +``` + +#### Deleting a Cluster + +```bash +# Delete a specific cluster +kind delete cluster --name rustfs-dev + +# Delete all kind clusters +kind delete cluster --all + +# Verify deletion +kind get clusters +``` + +#### Advanced kind Configuration + +Create a custom kind configuration file `kind-config.yaml`: + +```yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP +``` + +Create cluster with custom config: +```bash +kind create cluster --name rustfs-dev --config kind-config.yaml +``` + +--- + +## ๐Ÿ–ฅ๏ธ Installing OpenLens + +OpenLens is a powerful Kubernetes IDE for managing clusters visually. + +### Installation + +#### macOS + +```bash +# Using Homebrew +brew install --cask openlens + +# Or download from: https://github.com/MuhammedKalkan/OpenLens/releases +``` + +#### Linux + +```bash +# Download AppImage from releases +wget https://github.com/MuhammedKalkan/OpenLens/releases/latest/download/OpenLens-.AppImage +chmod +x OpenLens-.AppImage +./OpenLens-.AppImage + +# Or install via Snap +snap install openlens +``` + +#### Windows + +```bash +# Using Chocolatey +choco install openlens + +# Or download installer from: https://github.com/MuhammedKalkan/OpenLens/releases +``` + +### Connecting OpenLens to kind Cluster + +1. **Get kubeconfig path**: + ```bash + # kind stores kubeconfig in ~/.kube/config + # Or get specific context + kubectl config view --minify --context kind-rustfs-dev + ``` + +2. **Open OpenLens**: + - Click "Add Cluster" or "+" button + - Select "Add from kubeconfig" + - Navigate to `~/.kube/config` (or paste kubeconfig content) + - Select context: `kind-rustfs-dev` + - Click "Add" + +3. **Verify Connection**: + - You should see your kind cluster in the cluster list + - Click on it to view nodes, pods, services, etc. + +### Using OpenLens for Development + +- **View Resources**: Browse Tenants, Pods, StatefulSets, Services +- **View Logs**: Click on any Pod to see logs +- **Terminal Access**: Open terminal in Pods directly +- **Resource Editor**: Edit YAML files directly +- **Event Viewer**: Monitor Kubernetes events in real-time + +--- + +## ๐Ÿƒ Installing and Running the Operator + +### Step 1: Install CRD (Custom Resource Definition) + +The operator requires the Tenant CRD to be installed in your cluster: + +```bash +# Generate CRD YAML +cargo run -- crd > tenant-crd.yaml + +# Or output directly to file +cargo run -- crd -f tenant-crd.yaml + +# Install CRD +kubectl apply -f tenant-crd.yaml + +# Verify CRD is installed +kubectl get crd tenants.rustfs.com + +# View CRD details +kubectl describe crd tenants.rustfs.com +``` + +### Step 2: Configure kubectl Access + +Ensure `kubectl` can access your cluster: + +```bash +# Check current context +kubectl config current-context + +# List all contexts +kubectl config get-contexts + +# Switch to correct context (if needed) +kubectl config use-context kind-rustfs-dev + +# Verify cluster connection +kubectl cluster-info +kubectl get nodes +``` + +### Step 3: Run Operator Locally (Development Mode) + +#### Option A: Run from Source (Recommended for Development) + +```bash +# Set log level (optional) +export RUST_LOG=debug +export RUST_LOG=rustfs_operator=debug,kube=info + +# Run operator in debug mode +cargo run -- server + +# Or run in release mode (faster) +cargo run --release -- server +``` + +The operator will: +- Connect to your Kubernetes cluster +- Watch for Tenant CRD changes +- Reconcile resources (StatefulSets, Services, RBAC) + +#### Option B: Run Pre-built Binary + +```bash +# Build the binary first +cargo build --release + +# Run the binary +./target/release/operator server +``` + +#### Option C: Deploy as Pod in Cluster + +```bash +# Build Docker image +docker build -t rustfs/operator:dev . + +# Load image into kind cluster +kind load docker-image rustfs/operator:dev --name rustfs-dev + +# Deploy using Helm (see deploy/README.md) +helm install rustfs-operator deploy/rustfs-operator/ \ + --namespace rustfs-system \ + --create-namespace \ + --set image.tag=dev \ + --set image.pullPolicy=Never +``` + +### Step 4: Test the Operator + +In another terminal: + +```bash +# Create a test Tenant +kubectl apply -f examples/minimal-dev-tenant.yaml + +# Watch Tenant status +kubectl get tenant dev-minimal -w + +# View created resources +kubectl get pods -l rustfs.tenant=dev-minimal +kubectl get statefulset -l rustfs.tenant=dev-minimal +kubectl get svc -l rustfs.tenant=dev-minimal +kubectl get pvc -l rustfs.tenant=dev-minimal +``` + +--- + +## ๐Ÿ› Debugging the Operator + +### Debugging Methods + +#### 1. Local Development Debugging + +**Run with verbose logging**: +```bash +# Set detailed log levels +export RUST_LOG=debug +export RUST_LOG=rustfs_operator=debug,kube=info,tracing=debug + +# Run operator +cargo run -- server +``` + +**Use a debugger** (VS Code): +1. Install "CodeLLDB" extension +2. Create `.vscode/launch.json`: +```json +{ + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug Operator", + "cargo": { + "args": ["build", "--bin", "operator"], + "filter": { + "name": "operator", + "kind": "bin" + } + }, + "args": ["server"], + "cwd": "${workspaceFolder}", + "env": { + "RUST_LOG": "debug" + } + } + ] +} +``` +3. Set breakpoints and press F5 + +#### 2. Cluster-based Debugging + +**View operator logs** (if deployed in cluster): +```bash +# Get operator pod name +kubectl get pods -n rustfs-system + +# View logs +kubectl logs -f -n rustfs-system -l app.kubernetes.io/name=rustfs-operator + +# View logs with timestamps +kubectl logs -f -n rustfs-system -l app.kubernetes.io/name=rustfs-operator --timestamps + +# View previous logs (if pod restarted) +kubectl logs -f -n rustfs-system -l app.kubernetes.io/name=rustfs-operator --previous +``` + +**Debug operator pod**: +```bash +# Exec into operator pod +kubectl exec -it -n rustfs-system -- /bin/sh + +# Check environment variables +kubectl exec -n rustfs-system -- env +``` + +#### 3. Resource Debugging + +**Check reconciliation status**: +```bash +# View Tenant status +kubectl get tenant -o yaml + +# View Tenant events +kubectl describe tenant + +# View all events +kubectl get events --sort-by='.lastTimestamp' --all-namespaces + +# Watch events in real-time +kubectl get events --watch --all-namespaces +``` + +**Check created resources**: +```bash +# View StatefulSet details +kubectl get statefulset -l rustfs.tenant= -o yaml + +# View Pod status +kubectl get pods -l rustfs.tenant= -o wide + +# View Pod logs +kubectl logs -f -l rustfs.tenant= +``` + +--- + +## ๐Ÿ“‹ Logging and Log Locations + +### Log Levels + +The operator uses the `tracing` crate for structured logging. Log levels: + +- `ERROR` - Errors that need attention +- `WARN` - Warnings about potential issues +- `INFO` - General informational messages +- `DEBUG` - Detailed debugging information +- `TRACE` - Very detailed tracing (very verbose) + +### Setting Log Levels + +#### Environment Variables + +```bash +# Set global log level +export RUST_LOG=debug + +# Set per-module log levels +export RUST_LOG=rustfs_operator=debug,kube=info,tracing=warn + +# Common configurations: +# Development +export RUST_LOG=rustfs_operator=debug,kube=info + +# Production +export RUST_LOG=rustfs_operator=info,kube=warn + +# Troubleshooting +export RUST_LOG=rustfs_operator=trace,kube=debug +``` + +#### Log Location + +**When running locally**: +- Logs are output to **stdout/stderr** +- View in terminal where operator is running +- Can redirect to file: `cargo run -- server 2>&1 | tee operator.log` + +**When deployed in cluster**: +- Logs are stored in **Pod logs** +- View with: `kubectl logs -f -n rustfs-system` +- Logs persist until Pod is deleted +- Use log aggregation tools (e.g., Loki, Fluentd) for long-term storage + +### Viewing Logs + +#### Local Development + +```bash +# Terminal 1: Run operator with logging +export RUST_LOG=debug +cargo run -- server + +# Terminal 2: View logs in real-time (if redirected to file) +tail -f operator.log + +# Or use system log viewer (macOS) +log stream --predicate 'process == "operator"' +``` + +#### Cluster Deployment + +```bash +# View current logs +kubectl logs -f -n rustfs-system -l app.kubernetes.io/name=rustfs-operator + +# View logs with timestamps +kubectl logs -f -n rustfs-system -l app.kubernetes.io/name=rustfs-operator --timestamps + +# View last 100 lines +kubectl logs --tail=100 -n rustfs-system -l app.kubernetes.io/name=rustfs-operator + +# View logs since specific time +kubectl logs --since=10m -n rustfs-system -l app.kubernetes.io/name=rustfs-operator + +# View logs from previous container (if pod restarted) +kubectl logs --previous -n rustfs-system -l app.kubernetes.io/name=rustfs-operator + +# Export logs to file +kubectl logs -n rustfs-system -l app.kubernetes.io/name=rustfs-operator > operator.log +``` + +#### Using OpenLens + +1. Open OpenLens +2. Select your cluster +3. Navigate to **Workloads** โ†’ **Pods** +4. Find operator pod in `rustfs-system` namespace +5. Click on pod โ†’ **Logs** tab +6. View real-time logs with filtering options + +### Common Log Patterns + +**Successful reconciliation**: +``` +INFO reconcile: reconciled successful, object: +``` + +**Reconciliation errors**: +``` +ERROR reconcile: reconcile failed: +WARN error_policy: +``` + +**Resource creation**: +``` +DEBUG Creating StatefulSet +INFO StatefulSet created successfully +``` + +**Status updates**: +``` +DEBUG Updating tenant status: +``` + +--- + +## ๐Ÿงช Running Tests + +```bash +# Run all tests +cargo test + +# Use nextest (faster) +cargo nextest run + +# Or use just +just test + +# Run specific test +cargo test test_statefulset_no_update_needed + +# Run ignored tests (includes TLS tests) +cargo test -- --ignored + +# Run tests with output +cargo test -- --nocapture + +# Run tests in single thread (for debugging) +cargo test -- --test-threads=1 +``` + +--- + +## ๐Ÿ› ๏ธ Development Workflow + +### Daily Development Process + +1. **Create feature branch** + ```bash + git checkout -b feature/your-feature-name + ``` + +2. **Write code** + +3. **Format code** + ```bash + cargo fmt --all + # or + just fmt + ``` + +4. **Run checks** + ```bash + just pre-commit + # This runs: + # - fmt-check + # - clippy + # - check + # - test + ``` + +5. **Run tests** + ```bash + cargo test + # or + just test + ``` + +6. **Test operator locally** + ```bash + # Terminal 1: Run operator + cargo run -- server + + # Terminal 2: Create test resources + kubectl apply -f examples/minimal-dev-tenant.yaml + kubectl get tenant -w + ``` + +7. **Commit code** + ```bash + git add . + git commit -m "feat: your feature description" + ``` + +### Code Quality Checks + +The project enforces strict code quality standards: + +```bash +# Run all checks +just pre-commit + +# Run individual checks +just fmt-check # Check formatting +just clippy # Code linting +just check # Compilation check +just test # Tests +``` + +**Note**: The project has `deny`-level clippy rules: +- `unwrap_used = "deny"` - Prohibits `unwrap()` +- `expect_used = "deny"` - Prohibits `expect()` + +--- + +## ๐Ÿงน Cleaning Up + +### Clean Test Resources + +```bash +# Delete test Tenant (automatically deletes all related resources) +kubectl delete tenant dev-minimal + +# Delete all Tenants +kubectl delete tenant --all +``` + +### Clean Cluster + +```bash +# Delete kind cluster +kind delete cluster --name rustfs-dev + +# Delete all kind clusters +kind delete cluster --all + +# minikube +minikube delete +``` + +### Clean Build Artifacts + +```bash +# Clean target directory +cargo clean + +# Clean and rebuild +cargo clean && cargo build +``` + +--- + +## ๐Ÿ› Troubleshooting + +### 1. Rust Version Mismatch + +**Problem**: `error: toolchain 'stable' is not installed` + +**Solution**: +```bash +# Navigate to project directory, rustup will auto-install correct toolchain +cd /Users/hongwei/my/operator +rustup show +``` + +### 2. Cannot Connect to Kubernetes Cluster + +**Problem**: `Failed to connect to Kubernetes API` + +**Solution**: +```bash +# Check kubectl configuration +kubectl config current-context +kubectl cluster-info + +# Ensure cluster is running +kubectl get nodes + +# For kind: check if cluster containers are running +docker ps | grep rustfs-dev +``` + +### 3. CRD Not Found + +**Problem**: `the server could not find the requested resource` + +**Solution**: +```bash +# Reinstall CRD +cargo run -- crd | kubectl apply -f - + +# Verify CRD is installed +kubectl get crd tenants.rustfs.com +``` + +### 4. Clippy Errors + +**Problem**: Clippy reports `unwrap_used` or `expect_used` errors + +**Solution**: +- Use `Result` and `?` operator +- Use `match` or `if let` to handle `Option` +- Use `snafu` for error handling + +### 5. Test Failures + +**Problem**: Tests cannot run or fail + +**Solution**: +```bash +# Run single test with detailed output +cargo test -- --nocapture test_name + +# Run all tests (including ignored) +cargo test -- --include-ignored +``` + +### 6. kind Cluster Issues + +**Problem**: Cannot connect to kind cluster after Docker restart + +**Solution**: +```bash +# Recreate cluster +kind delete cluster --name rustfs-dev +kind create cluster --name rustfs-dev + +# Restore kubectl context +kubectl cluster-info --context kind-rustfs-dev +``` + +--- + +## ๐Ÿ“š Useful Command Reference + +### Cargo Commands + +```bash +# Build +cargo build # Debug build +cargo build --release # Release build + +# Check +cargo check # Quick compilation check +cargo clippy # Code linting + +# Test +cargo test # Run tests +cargo test -- --ignored # Run ignored tests +cargo nextest run # Use nextest + +# Format +cargo fmt # Format code +cargo fmt --all --check # Check formatting + +# Documentation +cargo doc --open # Generate and open docs +``` + +### kubectl Commands + +```bash +# CRD operations +kubectl get crd # List all CRDs +kubectl get tenant # List all Tenants +kubectl describe tenant # View Tenant details + +# Resource operations +kubectl get pods -l rustfs.tenant= +kubectl get statefulset -l rustfs.tenant= +kubectl get svc -l rustfs.tenant= + +# Logs +kubectl logs -f +kubectl logs -f -l rustfs.tenant= + +# Events +kubectl get events --sort-by='.lastTimestamp' +``` + +### kind Commands + +```bash +# Cluster management +kind create cluster --name # Create cluster +kind delete cluster --name # Delete cluster +kind get clusters # List clusters +kind get nodes --name # List nodes + +# Image management +kind load docker-image --name # Load image +``` + +--- + +## ๐ŸŽฏ Next Steps + +- View [CONTRIBUTING.md](../CONTRIBUTING.md) for contribution guidelines +- View [DEVELOPMENT-NOTES.md](./DEVELOPMENT-NOTES.md) for development notes +- View [architecture-decisions.md](./architecture-decisions.md) for architecture decisions +- View [../examples/](../examples/) for usage examples + +--- + +**Happy coding!** ๐Ÿš€ diff --git a/docs/POOL-STATUS-EXPLANATION.md b/docs/POOL-STATUS-EXPLANATION.md new file mode 100644 index 0000000..a7dfed9 --- /dev/null +++ b/docs/POOL-STATUS-EXPLANATION.md @@ -0,0 +1,285 @@ +# Pool Status Structure Explanation + +This document explains what the `Pool` structure in `src/types/v1alpha1/status/pool.rs` represents. + +--- + +## ๐Ÿ“‹ Overview + +The `Pool` struct in `src/types/v1alpha1/status/pool.rs` represents the **runtime status** of a storage pool in a RustFS Tenant. It is part of the Tenant's status field and tracks the actual state of the StatefulSet that manages the pool's Pods. + +--- + +## ๐Ÿ” Key Concepts + +### Two Different `Pool` Types + +There are **two different** `Pool` structures in the codebase: + +1. **`src/types/v1alpha1/pool.rs::Pool`** - **Spec (Desired State)** + - User-defined configuration + - Part of `TenantSpec` + - Defines what the user wants (e.g., `servers: 4`, `volumesPerServer: 2`) + +2. **`src/types/v1alpha1/status/pool.rs::Pool`** - **Status (Actual State)** + - Runtime status information + - Part of `TenantStatus` + - Tracks what actually exists (e.g., `replicas: 4`, `ready_replicas: 3`) + +### Relationship + +``` +Tenant CRD +โ”œโ”€โ”€ spec.pools[] โ† User configuration (pool.rs::Pool) +โ”‚ โ””โ”€โ”€ name: "pool-0" +โ”‚ servers: 4 +โ”‚ volumesPerServer: 2 +โ”‚ +โ””โ”€โ”€ status.pools[] โ† Runtime status (status/pool.rs::Pool) + โ””โ”€โ”€ ss_name: "tenant-pool-0" + state: "RolloutComplete" + replicas: 4 + ready_replicas: 4 +``` + +--- + +## ๐Ÿ“Š Pool Status Structure + +### Fields Explained + +```rust +pub struct Pool { + /// Name of the StatefulSet for this pool + pub ss_name: String, + + /// Current state of the pool + pub state: PoolState, + + /// Total number of non-terminated pods targeted by this pool's StatefulSet + pub replicas: Option, + + /// Number of pods with Ready condition + pub ready_replicas: Option, + + /// Number of pods with current revision + pub current_replicas: Option, + + /// Number of pods with updated revision + pub updated_replicas: Option, + + /// Current revision hash of the StatefulSet + pub current_revision: Option, + + /// Update revision hash of the StatefulSet (different from current during rollout) + pub update_revision: Option, + + /// Last time the pool status was updated + pub last_update_time: Option, +} +``` + +### Field Details + +#### `ss_name: String` +- **Meaning**: The name of the StatefulSet that manages this pool +- **Format**: `{tenant-name}-{pool-name}` +- **Example**: `dev-minimal-dev-pool` +- **Purpose**: Used to identify and query the StatefulSet resource + +#### `state: PoolState` +- **Meaning**: Current operational state of the pool +- **Possible Values**: See `PoolState` enum below +- **Purpose**: Quick status indicator for monitoring and debugging + +#### `replicas: Option` +- **Meaning**: Total number of Pods that should exist (desired replicas) +- **Source**: `StatefulSet.status.replicas` +- **Example**: `4` means 4 Pods should exist +- **Purpose**: Track desired vs actual Pod count + +#### `ready_replicas: Option` +- **Meaning**: Number of Pods that are Ready (passing readiness probe) +- **Source**: `StatefulSet.status.readyReplicas` +- **Example**: `3` means 3 out of 4 Pods are ready +- **Purpose**: Determine if pool is fully operational + +#### `current_replicas: Option` +- **Meaning**: Number of Pods running the current (old) revision +- **Source**: `StatefulSet.status.currentReplicas` +- **Example**: During update, `2` means 2 Pods still on old version +- **Purpose**: Track rollout progress + +#### `updated_replicas: Option` +- **Meaning**: Number of Pods running the updated (new) revision +- **Source**: `StatefulSet.status.updatedReplicas` +- **Example**: During update, `2` means 2 Pods on new version +- **Purpose**: Track rollout progress + +#### `current_revision: Option` +- **Meaning**: Revision hash of the current StatefulSet template +- **Source**: `StatefulSet.status.currentRevision` +- **Example**: `"tenant-pool-0-abc123"` +- **Purpose**: Identify which template version Pods are running + +#### `update_revision: Option` +- **Meaning**: Revision hash of the updated StatefulSet template (during rollout) +- **Source**: `StatefulSet.status.updateRevision` +- **Example**: `"tenant-pool-0-def456"` +- **Purpose**: Identify which template version is being rolled out + +#### `last_update_time: Option` +- **Meaning**: Timestamp when this status was last updated +- **Format**: RFC3339 timestamp +- **Example**: `"2025-01-15T10:30:00Z"` +- **Purpose**: Track when status was last refreshed + +--- + +## ๐ŸŽฏ PoolState Enum + +The `PoolState` enum represents the operational state of a pool: + +```rust +pub enum PoolState { + Created, // PoolCreated - StatefulSet exists + NotCreated, // PoolNotCreated - StatefulSet doesn't exist or has 0 replicas + Initialized, // PoolInitialized - Pool is initialized but not all replicas ready + Updating, // PoolUpdating - Rollout in progress + RolloutComplete, // PoolRolloutComplete - All replicas ready and updated + RolloutFailed, // PoolRolloutFailed - Rollout failed + Degraded, // PoolDegraded - Some replicas not ready +} +``` + +### State Determination Logic + +The state is determined based on StatefulSet status: + +```rust +if desired == 0 { + PoolState::NotCreated +} else if ready == desired && updated == desired { + PoolState::RolloutComplete // All good! +} else if updated < desired || current < desired { + PoolState::Updating // Rollout in progress +} else if ready < desired { + PoolState::Degraded // Some Pods not ready +} else { + PoolState::Initialized // Initialized but not fully ready +} +``` + +--- + +## ๐Ÿ”„ How It's Used + +### 1. Status Collection + +During reconciliation, the operator: + +1. **Queries StatefulSets** for each pool in `spec.pools` +2. **Extracts status** from each StatefulSet +3. **Builds Pool status** using `build_pool_status()` method +4. **Aggregates** all pool statuses into `TenantStatus.pools[]` + +### 2. Status Update Flow + +``` +Reconciliation Loop + โ†“ +For each pool in spec.pools: + โ†“ +Get StatefulSet: {tenant-name}-{pool-name} + โ†“ +Extract StatefulSet.status + โ†“ +Build Pool status object + โ†“ +Add to TenantStatus.pools[] + โ†“ +Update Tenant.status +``` + +### 3. Example Status Output + +```yaml +apiVersion: rustfs.com/v1alpha1 +kind: Tenant +metadata: + name: dev-minimal +status: + currentState: "Ready" + availableReplicas: 4 + pools: + - ssName: "dev-minimal-dev-pool" + state: "PoolRolloutComplete" + replicas: 4 + readyReplicas: 4 + currentReplicas: 4 + updatedReplicas: 4 + currentRevision: "dev-minimal-dev-pool-abc123" + updateRevision: "dev-minimal-dev-pool-abc123" + lastUpdateTime: "2025-01-15T10:30:00Z" +``` + +--- + +## ๐Ÿ’ก Use Cases + +### 1. Monitoring Pool Health + +```bash +# Check pool status +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[*].state}' + +# Check ready replicas +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[*].readyReplicas}' +``` + +### 2. Detecting Rollout Progress + +```bash +# Check if pool is updating +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[?(@.state=="PoolUpdating")]}' + +# Compare current vs updated replicas +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[*].currentReplicas}' +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[*].updatedReplicas}' +``` + +### 3. Debugging Issues + +```bash +# Check if pool is degraded +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[?(@.state=="PoolDegraded")]}' + +# View full pool status +kubectl get tenant dev-minimal -o jsonpath='{.status.pools[*]}' | jq +``` + +--- + +## ๐Ÿ”— Related Code + +- **Status Collection**: `src/types/v1alpha1/tenant.rs::build_pool_status()` +- **Status Aggregation**: `src/reconcile.rs` (reconciliation loop) +- **Status Definition**: `src/types/v1alpha1/status.rs::Status` +- **Pool Spec**: `src/types/v1alpha1/pool.rs::Pool` + +--- + +## Summary + +**`status/pool.rs::Pool`** represents: +- โœ… **Runtime status** of a storage pool +- โœ… **StatefulSet status** information +- โœ… **Pod replica counts** and readiness +- โœ… **Rollout progress** during updates +- โœ… **Operational state** (Ready, Updating, Degraded, etc.) + +**Key Distinction**: +- `spec.pools[]` = What you want (configuration) +- `status.pools[]` = What actually exists (runtime status) + +This separation allows the operator to track the difference between desired and actual state, enabling proper reconciliation and status reporting. diff --git a/docs/RUSTFS-K8S-INTEGRATION.md b/docs/RUSTFS-K8S-INTEGRATION.md new file mode 100644 index 0000000..ef4133e --- /dev/null +++ b/docs/RUSTFS-K8S-INTEGRATION.md @@ -0,0 +1,405 @@ +# RustFS Encapsulation in Kubernetes + +This document explains in detail how the RustFS Kubernetes Operator encapsulates RustFS into Kubernetes and how it handles RustFS's dependency on system paths. + +--- + +## ๐Ÿ“‹ Project Overview + +### What Does This Project Do? + +**RustFS Kubernetes Operator** is a Kubernetes Operator that: + +1. **Automates RustFS Deployment**: Automatically creates and manages RustFS storage clusters through declarative configuration (CRD) +2. **Encapsulates Complexity**: Hides the complexity of Kubernetes resource creation (StatefulSet, Service, PVC, RBAC, etc.) +3. **Lifecycle Management**: Automatically handles creation, updates, scaling, and deletion of RustFS clusters +4. **Configuration Management**: Automatically generates environment variables and configurations required by RustFS + +### Core Value + +**Without Operator**, deploying RustFS requires manually creating: +- StatefulSet (managing Pods) +- PersistentVolumeClaim (storage volumes) +- Service (service discovery) +- RBAC (permissions) +- ConfigMap/Secret (configuration) +- Manually configuring `RUSTFS_VOLUMES` environment variable + +**With Operator**, you only need: +```yaml +apiVersion: rustfs.com/v1alpha1 +kind: Tenant +metadata: + name: my-rustfs +spec: + pools: + - name: primary + servers: 2 + persistence: + volumesPerServer: 2 +``` + +The Operator automatically creates all necessary resources! + +--- + +## ๐Ÿ” RustFS Path Dependency Problem + +### How Does RustFS Work? + +RustFS is a distributed object storage system that requires: + +1. **Local Storage Paths**: Each node needs to access local disk paths to store data + - Example: `/data/rustfs0`, `/data/rustfs1`, `/data/rustfs2`, `/data/rustfs3` + - These paths must exist and be writable + +2. **Network Communication**: Nodes need to communicate over the network to coordinate data distribution + - RustFS uses the `RUSTFS_VOLUMES` environment variable to discover other nodes + - Format: `http://node1:9000/data/rustfs{0...N} http://node2:9000/data/rustfs{0...N} ...` + +3. **Path Convention**: RustFS follows a specific path naming convention + - Base path + `/rustfs{index}` + - Example: `/data/rustfs0`, `/data/rustfs1` + +### Problems with Traditional Deployment + +Deploying RustFS on traditional servers: + +```bash +# 1. Create directories +mkdir -p /data/rustfs{0..3} + +# 2. Set permissions +chown -R rustfs:rustfs /data + +# 3. Configure environment variables +export RUSTFS_VOLUMES="http://node1:9000/data/rustfs{0...3} http://node2:9000/data/rustfs{0...3}" + +# 4. Start RustFS +rustfs server +``` + +**Problems**: +- โŒ Paths are hardcoded and inflexible +- โŒ Requires manual management of multiple nodes +- โŒ Difficult to use in container environments (container filesystems are ephemeral) +- โŒ Cannot leverage Kubernetes storage abstractions + +--- + +## โœ… Kubernetes Solution + +### Core Idea: Use PersistentVolume + VolumeMount + +Kubernetes solves the path dependency problem through the following mechanisms: + +1. **PersistentVolumeClaim (PVC)**: Abstracts storage without caring about the underlying implementation +2. **VolumeMount**: Mounts PVCs to specified paths in containers +3. **StatefulSet**: Ensures stable network identity and storage for Pods + +### Implementation Principles + +#### 1. Create PersistentVolumeClaim Templates + +The Operator creates PVCs for each volume: + +```rust +// Code location: src/types/v1alpha1/tenant/workloads.rs + +fn volume_claim_templates(&self, pool: &Pool) -> Result> { + // Create PVC template for each volume + // Example: vol-0, vol-1, vol-2, vol-3 + let templates: Vec<_> = (0..pool.persistence.volumes_per_server) + .map(|i| PersistentVolumeClaim { + metadata: ObjectMeta { + name: Some(format!("vol-{}", i)), // vol-0, vol-1, ... + .. + }, + spec: Some(PersistentVolumeClaimSpec { + access_modes: Some(vec!["ReadWriteOnce".to_string()]), + resources: Some(VolumeResourceRequirements { + requests: Some(resources), + .. + }), + .. + }), + .. + }) + .collect(); +} +``` + +**Generated PVCs**: +```yaml +# StatefulSet automatically creates these PVCs for each Pod +# Pod 0: dev-minimal-dev-pool-0-vol-0, dev-minimal-dev-pool-0-vol-1, ... +# Pod 1: dev-minimal-dev-pool-1-vol-0, dev-minimal-dev-pool-1-vol-1, ... +``` + +#### 2. Mount PVCs to Container Paths + +The Operator creates VolumeMounts to mount PVCs to paths expected by RustFS: + +```rust +// Code location: src/types/v1alpha1/tenant/workloads.rs + +let base_path = pool.persistence.path.as_deref().unwrap_or("/data"); +let mut volume_mounts: Vec = (0..pool.persistence.volumes_per_server) + .map(|i| VolumeMount { + name: format!("vol-{}", i), // Corresponds to PVC name + mount_path: format!("{}/rustfs{}", base_path, i), // /data/rustfs0, /data/rustfs1, ... + .. + }) + .collect(); +``` + +**Result**: +- PVC `vol-0` โ†’ mounted to `/data/rustfs0` +- PVC `vol-1` โ†’ mounted to `/data/rustfs1` +- PVC `vol-2` โ†’ mounted to `/data/rustfs2` +- PVC `vol-3` โ†’ mounted to `/data/rustfs3` + +#### 3. Automatically Generate RUSTFS_VOLUMES Environment Variable + +The Operator automatically generates `RUSTFS_VOLUMES` to tell RustFS how to find other nodes: + +```rust +// Code location: src/types/v1alpha1/tenant/workloads.rs + +fn rustfs_volumes_env_value(&self) -> Result { + // Generated format: + // http://{tenant}-{pool}-{0...servers-1}.{service}.{namespace}.svc.cluster.local:9000{path}/rustfs{0...volumes-1} + + format!( + "http://{tenant}-{pool}-{{0...{}}}.{service}.{namespace}.svc.cluster.local:9000{}/rustfs{{0...{}}}", + servers - 1, + base_path, // /data + volumes_per_server - 1 + ) +} +``` + +**Example Output** (2 servers, 2 volumes each): +``` +http://dev-minimal-dev-pool-{0...1}.dev-minimal-hl.default.svc.cluster.local:9000/data/rustfs{0...1} +``` + +**Expanded**: +``` +http://dev-minimal-dev-pool-0.dev-minimal-hl.default.svc.cluster.local:9000/data/rustfs0 +http://dev-minimal-dev-pool-0.dev-minimal-hl.default.svc.cluster.local:9000/data/rustfs1 +http://dev-minimal-dev-pool-1.dev-minimal-hl.default.svc.cluster.local:9000/data/rustfs0 +http://dev-minimal-dev-pool-1.dev-minimal-hl.default.svc.cluster.local:9000/data/rustfs1 +``` + +--- + +## ๐Ÿ—๏ธ Complete Architecture Diagram + +``` +User creates Tenant CRD + โ†“ +Operator reconciliation loop + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ 1. Create RBAC Resources โ”‚ +โ”‚ - Role โ”‚ +โ”‚ - ServiceAccount โ”‚ +โ”‚ - RoleBinding โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ 2. Create Services โ”‚ +โ”‚ - IO Service (port 9000) โ”‚ +โ”‚ - Console Service (port 9001) โ”‚ +โ”‚ - Headless Service (DNS) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ 3. Create StatefulSet for each Pool โ”‚ +โ”‚ โ”œโ”€ Pod Template โ”‚ +โ”‚ โ”‚ โ”œโ”€ Container: rustfs/rustfs โ”‚ +โ”‚ โ”‚ โ”œโ”€ VolumeMounts: โ”‚ +โ”‚ โ”‚ โ”‚ โ”œโ”€ vol-0 โ†’ /data/rustfs0 โ”‚ +โ”‚ โ”‚ โ”‚ โ”œโ”€ vol-1 โ†’ /data/rustfs1 โ”‚ +โ”‚ โ”‚ โ”‚ โ”œโ”€ vol-2 โ†’ /data/rustfs2 โ”‚ +โ”‚ โ”‚ โ”‚ โ””โ”€ vol-3 โ†’ /data/rustfs3 โ”‚ +โ”‚ โ”‚ โ””โ”€ Env: โ”‚ +โ”‚ โ”‚ โ””โ”€ RUSTFS_VOLUMES=... โ”‚ +โ”‚ โ””โ”€ VolumeClaimTemplates: โ”‚ +โ”‚ โ”œโ”€ vol-0 (10Gi) โ”‚ +โ”‚ โ”œโ”€ vol-1 (10Gi) โ”‚ +โ”‚ โ”œโ”€ vol-2 (10Gi) โ”‚ +โ”‚ โ””โ”€ vol-3 (10Gi) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ +Kubernetes creates resources + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ StatefulSet Controller creates Pods โ”‚ +โ”‚ โ”œโ”€ Pod: dev-minimal-dev-pool-0 โ”‚ +โ”‚ โ”‚ โ”œโ”€ PVC: dev-minimal-dev-pool-0-vol-0 +โ”‚ โ”‚ โ”œโ”€ PVC: dev-minimal-dev-pool-0-vol-1 +โ”‚ โ”‚ โ”œโ”€ PVC: dev-minimal-dev-pool-0-vol-2 +โ”‚ โ”‚ โ””โ”€ PVC: dev-minimal-dev-pool-0-vol-3 +โ”‚ โ””โ”€ Pod: dev-minimal-dev-pool-1 โ”‚ +โ”‚ โ”œโ”€ PVC: dev-minimal-dev-pool-1-vol-0 +โ”‚ โ”œโ”€ PVC: dev-minimal-dev-pool-1-vol-1 +โ”‚ โ”œโ”€ PVC: dev-minimal-dev-pool-1-vol-2 +โ”‚ โ””โ”€ PVC: dev-minimal-dev-pool-1-vol-3 +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ +Storage Provider (StorageClass) creates PV + โ†“ +Pod starts, RustFS accesses mounted paths +``` + +--- + +## ๐Ÿ”„ Data Persistence Flow + +### Data Persists Across Pod Restarts + +1. **StatefulSet Guarantees**: + - Stable Pod names: `dev-minimal-dev-pool-0` + - Stable PVC names: `dev-minimal-dev-pool-0-vol-0` + - Even if Pod restarts, PVCs remain unchanged + +2. **Storage Persistence**: + ``` + Pod deleted โ†’ PVC retained โ†’ Pod recreated โ†’ PVC remounted โ†’ Data restored + ``` + +3. **Path Consistency**: + - PVCs are always mounted to the same paths (`/data/rustfs0`) + - RustFS doesn't need to know what the underlying storage is (local disk, network storage, cloud storage) + +--- + +## ๐Ÿ’ก Key Design Decisions + +### 1. Why Use StatefulSet? + +- โœ… **Stable Network Identity**: Pods have stable DNS names for `RUSTFS_VOLUMES` +- โœ… **Ordered Deployment**: Can control Pod startup order +- โœ… **Stable Storage**: Each Pod has independent PVCs, data persists when Pod is recreated + +### 2. Why Use VolumeClaimTemplates? + +- โœ… **Automation**: No need to manually create PVCs +- โœ… **Dynamic Creation**: StatefulSet automatically creates PVCs for each Pod +- โœ… **Naming Convention**: PVC names are associated with Pod names + +### 3. Why Are Paths `/data/rustfs{0...N}`? + +- โœ… **RustFS Convention**: Follows RustFS path naming conventions +- โœ… **Configurable**: Users can customize the base path via `persistence.path` +- โœ… **Clear**: Path names clearly indicate the volume's purpose + +--- + +## ๐Ÿ“ Practical Example + +### User Configuration + +```yaml +apiVersion: rustfs.com/v1alpha1 +kind: Tenant +metadata: + name: my-rustfs +spec: + pools: + - name: primary + servers: 2 + persistence: + volumesPerServer: 2 + path: /data # Optional, defaults to /data +``` + +### Resources Generated by Operator + +#### StatefulSet + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: my-rustfs-primary +spec: + replicas: 2 + serviceName: my-rustfs-hl + template: + spec: + containers: + - name: rustfs + image: rustfs/rustfs:latest + env: + - name: RUSTFS_VOLUMES + value: "http://my-rustfs-primary-{0...1}.my-rustfs-hl.default.svc.cluster.local:9000/data/rustfs{0...1}" + volumeMounts: + - name: vol-0 + mountPath: /data/rustfs0 + - name: vol-1 + mountPath: /data/rustfs1 + volumeClaimTemplates: + - metadata: + name: vol-0 + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + - metadata: + name: vol-1 + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi +``` + +#### Actually Created Pods and PVCs + +**Pod 0**: +- Pod name: `my-rustfs-primary-0` +- PVC: `my-rustfs-primary-0-vol-0` โ†’ mounted to `/data/rustfs0` +- PVC: `my-rustfs-primary-0-vol-1` โ†’ mounted to `/data/rustfs1` + +**Pod 1**: +- Pod name: `my-rustfs-primary-1` +- PVC: `my-rustfs-primary-1-vol-0` โ†’ mounted to `/data/rustfs0` +- PVC: `my-rustfs-primary-1-vol-1` โ†’ mounted to `/data/rustfs1` + +--- + +## ๐ŸŽฏ Summary + +### Solution to RustFS Path Dependency + +| Problem | Traditional Approach | Kubernetes Approach | +|---------|---------------------|---------------------| +| **Path Management** | Manually create directories | VolumeMount automatically mounts | +| **Storage Abstraction** | Direct use of local disk | PVC abstraction, supports multiple storage backends | +| **Data Persistence** | Depends on physical disk | PVC ensures data persistence | +| **Multi-node Coordination** | Manually configure IPs | Headless Service + DNS | +| **Configuration Management** | Manually set environment variables | Operator automatically generates | + +### Core Advantages + +1. **Declarative Configuration**: Users only declare "what they want", Operator handles "how to do it" +2. **Storage Abstraction**: Doesn't care if the underlying storage is local disk, NFS, cloud storage, or others +3. **Automation**: Automatically creates, configures, and manages all resources +4. **Portability**: Same configuration can run on any Kubernetes cluster +5. **Scalability**: Easily add nodes and scale storage + +--- + +## ๐Ÿ”— Related Documentation + +- [Architecture Decisions](./architecture-decisions.md) +- [Development Notes](./DEVELOPMENT-NOTES.md) +- [Usage Examples](../examples/README.md) + +--- + +**Key Understanding**: RustFS does depend on system paths, but Kubernetes uses the VolumeMount mechanism to "disguise" persistent storage as filesystem paths, making RustFS think it's accessing local disk when it's actually accessing Kubernetes-managed persistent storage. This is the core idea of containerized storage systems! diff --git a/docs/RUSTFS-OBJECT-STORAGE-USAGE.md b/docs/RUSTFS-OBJECT-STORAGE-USAGE.md new file mode 100644 index 0000000..488dc82 --- /dev/null +++ b/docs/RUSTFS-OBJECT-STORAGE-USAGE.md @@ -0,0 +1,664 @@ +# RustFS Object Storage Configuration and Usage Guide + +This document explains in detail the meaning of RustFS configuration parameters and how to use RustFS as an object storage system. + +--- + +## ๐Ÿ“‹ Configuration Parameters Explained + +### Example Configuration + +```yaml +pools: + - name: dev-pool + servers: 1 # Number of server nodes + persistence: + volumesPerServer: 4 # Number of storage volumes per server +``` + +### Parameter Meanings + +#### `servers: 1` + +**Meaning**: Number of server nodes in the RustFS cluster + +- **Purpose**: Determines how many Pods to create (each Pod represents a RustFS server node) +- **Examples**: + - `servers: 1` โ†’ Creates 1 Pod (single node, suitable for development) + - `servers: 4` โ†’ Creates 4 Pods (4-node cluster, suitable for production) + - `servers: 16` โ†’ Creates 16 Pods (large-scale cluster) + +**Actual Effect**: +- Operator creates a StatefulSet with replicas = `servers` +- Each Pod runs a RustFS server instance +- Pod naming format: `{tenant-name}-{pool-name}-{0...servers-1}` + +#### `volumesPerServer: 4` + +**Meaning**: Number of storage volumes on each server node + +- **Purpose**: Determines how many persistent storage volumes each Pod mounts +- **Examples**: + - `volumesPerServer: 4` โ†’ Each Pod has 4 storage volumes + - `volumesPerServer: 8` โ†’ Each Pod has 8 storage volumes + +**Actual Effect**: +- Operator creates `volumesPerServer` PVCs for each Pod +- Each PVC is mounted to container paths: `/data/rustfs0`, `/data/rustfs1`, `/data/rustfs2`, `/data/rustfs3` +- PVC naming format: `{pod-name}-vol-0`, `{pod-name}-vol-1`, ... + +#### Total Storage Volume Count + +**Calculation Formula**: `Total volumes = servers ร— volumesPerServer` + +**Total volumes for example configuration**: +``` +servers: 1 +volumesPerServer: 4 +โ†’ Total volumes = 1 ร— 4 = 4 storage volumes +``` + +**Minimum Requirement**: `servers ร— volumesPerServer >= 4` + +This is RustFS's Erasure Coding requirement, which needs at least 4 storage volumes to function properly. + +--- + +## ๐Ÿ—๏ธ Actually Created Resources + +### What Does the Example Configuration Create? + +```yaml +pools: + - name: dev-pool + servers: 1 + persistence: + volumesPerServer: 4 +``` + +#### 1. StatefulSet + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: dev-minimal-dev-pool +spec: + replicas: 1 # servers: 1 + template: + spec: + containers: + - name: rustfs + image: rustfs/rustfs:latest + env: + - name: RUSTFS_VOLUMES + value: "http://dev-minimal-dev-pool-{0...0}.dev-minimal-hl.default.svc.cluster.local:9000/data/rustfs{0...3}" + volumeMounts: + - name: vol-0 + mountPath: /data/rustfs0 + - name: vol-1 + mountPath: /data/rustfs1 + - name: vol-2 + mountPath: /data/rustfs2 + - name: vol-3 + mountPath: /data/rustfs3 + volumeClaimTemplates: + - metadata: + name: vol-0 + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + - metadata: + name: vol-1 + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + - metadata: + name: vol-2 + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + - metadata: + name: vol-3 + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi +``` + +#### 2. PersistentVolumeClaims (PVCs) + +``` +dev-minimal-dev-pool-0-vol-0 (10Gi) +dev-minimal-dev-pool-0-vol-1 (10Gi) +dev-minimal-dev-pool-0-vol-2 (10Gi) +dev-minimal-dev-pool-0-vol-3 (10Gi) +``` + +**Total Storage Capacity**: 4 ร— 10Gi = 40Gi (default 10Gi per volume) + +#### 3. Pod + +``` +dev-minimal-dev-pool-0 +``` + +Paths mounted inside the Pod: +- `/data/rustfs0` โ† PVC `dev-minimal-dev-pool-0-vol-0` +- `/data/rustfs1` โ† PVC `dev-minimal-dev-pool-0-vol-1` +- `/data/rustfs2` โ† PVC `dev-minimal-dev-pool-0-vol-2` +- `/data/rustfs3` โ† PVC `dev-minimal-dev-pool-0-vol-3` + +--- + +## ๐Ÿ’พ How RustFS Object Storage Works + +### 1. Data Distribution Mechanism + +RustFS uses **Erasure Coding** to distribute data: + +``` +User uploads object + โ†“ +RustFS splits object into data shards + โ†“ +Calculates parity shards + โ†“ +Distributes data and parity shards across all storage volumes + โ†“ +Data redundantly stored, can recover even if some volumes fail +``` + +**Example** (with 4 volumes): +- Object is split into 2 data shards + 2 parity shards +- Each shard is stored on a different volume +- Even if 2 volumes fail, data can still be recovered from the remaining 2 volumes + +### 2. Role of Storage Volumes + +Each storage volume (`/data/rustfs0`, `/data/rustfs1`, ...): +- **Stores data shards**: Part of the object's data +- **Stores metadata**: Object metadata, indexes, etc. +- **Participates in erasure coding**: Works with other volumes to provide data redundancy + +### 3. Why Are At Least 4 Volumes Required? + +RustFS's erasure coding algorithm requires: +- **Minimum data shards**: At least 2 data shards +- **Minimum parity shards**: At least 2 parity shards +- **Total**: At least 4 shards โ†’ At least 4 storage volumes + +**Configuration Examples**: +- โœ… `servers: 1, volumesPerServer: 4` โ†’ 4 volumes (minimum configuration) +- โœ… `servers: 2, volumesPerServer: 2` โ†’ 4 volumes (minimum configuration) +- โœ… `servers: 4, volumesPerServer: 1` โ†’ 4 volumes (minimum configuration) +- โŒ `servers: 1, volumesPerServer: 2` โ†’ 2 volumes (insufficient, won't work) +- โŒ `servers: 2, volumesPerServer: 1` โ†’ 2 volumes (insufficient, won't work) + +--- + +## ๐Ÿš€ How to Use RustFS Object Storage + +### 1. Deploy RustFS Cluster + +```bash +# Apply configuration +kubectl apply -f examples/minimal-dev-tenant.yaml + +# Wait for Pods to be ready +kubectl wait --for=condition=ready pod -l rustfs.tenant=dev-minimal --timeout=300s + +# Check status +kubectl get tenant dev-minimal +kubectl get pods -l rustfs.tenant=dev-minimal +``` + +### 2. Access S3 API + +RustFS provides S3-compatible object storage API. The Service type created by the Operator is `ClusterIP`, which means: + +- **Inside cluster**: Can directly use Service DNS names to access (**no port-forward needed**) +- **Outside cluster**: Requires port forwarding, Ingress, or LoadBalancer + +#### Method 1: Cluster-Internal Access (Recommended for Production) + +**Use Case**: Applications running inside Kubernetes cluster accessing RustFS + +**Service DNS Name Format**: +- S3 API: `http://rustfs.{namespace}.svc.cluster.local:9000` +- Console UI: `http://{tenant-name}-console.{namespace}.svc.cluster.local:9001` + +**Example** (in Pod or cluster-internal application): + +```bash +# Use Service DNS name (no port-forward needed) +# S3 API endpoint +http://rustfs.default.svc.cluster.local:9000 + +# Console UI endpoint +http://dev-minimal-console.default.svc.cluster.local:9001 +``` + +**Using MinIO Client (Cluster-Internal)**: +```bash +# Execute in Pod inside cluster +mc alias set rustfs http://rustfs.default.svc.cluster.local:9000 rustfsadmin rustfsadmin +mc mb rustfs/my-bucket +mc cp file.txt rustfs/my-bucket/ +``` + +**Using AWS CLI (Cluster-Internal)**: +```bash +# Execute in Pod inside cluster +aws --endpoint-url http://rustfs.default.svc.cluster.local:9000 s3 ls +aws --endpoint-url http://rustfs.default.svc.cluster.local:9000 s3 mb s3://my-bucket +``` + +**Using Python SDK (Cluster-Internal)**: +```python +import boto3 +from botocore.client import Config + +# Use Service DNS (no port-forward needed) +s3 = boto3.client( + 's3', + endpoint_url='http://rustfs.default.svc.cluster.local:9000', # Cluster-internal DNS + aws_access_key_id='rustfsadmin', + aws_secret_access_key='rustfsadmin', + config=Config(signature_version='s3v4'), + region_name='us-east-1' +) +``` + +#### Method 2: Port Forwarding (Local Development/Testing) + +**Use Case**: Accessing RustFS in cluster from local machine (development, testing, debugging) + +โš ๏ธ **Note**: This method requires keeping the `kubectl port-forward` command running + +```bash +# Terminal 1: Forward S3 API port (9000) +kubectl port-forward svc/rustfs 9000:9000 + +# Terminal 2: Use localhost to access (requires port forwarding) +mc alias set devlocal http://localhost:9000 rustfsadmin rustfsadmin +mc mb devlocal/my-bucket +mc cp file.txt devlocal/my-bucket/ +``` + +**Using MinIO Client (Requires port-forward)**: +```bash +# Must execute port forwarding first +kubectl port-forward svc/rustfs 9000:9000 + +# Then use localhost +mc alias set devlocal http://localhost:9000 rustfsadmin rustfsadmin +mc mb devlocal/my-bucket +mc cp /path/to/file.txt devlocal/my-bucket/ +mc ls devlocal/my-bucket +``` + +**Using AWS CLI (Requires port-forward)**: +```bash +# Must execute port forwarding first +kubectl port-forward svc/rustfs 9000:9000 + +# Then use localhost +export AWS_ACCESS_KEY_ID=rustfsadmin +export AWS_SECRET_ACCESS_KEY=rustfsadmin +aws --endpoint-url http://localhost:9000 s3 ls +aws --endpoint-url http://localhost:9000 s3 mb s3://my-bucket +aws --endpoint-url http://localhost:9000 s3 cp file.txt s3://my-bucket/ +``` + +**Using Python SDK (Requires port-forward)**: +```python +import boto3 +from botocore.client import Config + +# Must execute first: kubectl port-forward svc/rustfs 9000:9000 +s3 = boto3.client( + 's3', + endpoint_url='http://localhost:9000', # Requires port forwarding + aws_access_key_id='rustfsadmin', + aws_secret_access_key='rustfsadmin', + config=Config(signature_version='s3v4'), + region_name='us-east-1' +) +``` + +#### Method 3: Using Ingress (Recommended for Production) + +**Use Case**: Production environment, requires HTTPS and domain name access + +Create Ingress resource: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: rustfs-ingress + namespace: default +spec: + rules: + - host: rustfs.example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: rustfs + port: + number: 9000 +``` + +Then access using domain name: +```bash +mc alias set production https://rustfs.example.com rustfsadmin rustfsadmin +``` + +#### Method 4: Using LoadBalancer (Cloud Environments) + +**Use Case**: Cloud environments (AWS, GCP, Azure), requires external IP + +Modify Service type (requires manual modification or Helm values): + +```yaml +# Note: Operator creates ClusterIP by default, need to manually change to LoadBalancer +apiVersion: v1 +kind: Service +metadata: + name: rustfs +spec: + type: LoadBalancer # Change to LoadBalancer + ports: + - port: 9000 +``` + +Then access using external IP: +```bash +# Get external IP +kubectl get svc rustfs + +# Use external IP +mc alias set production http://:9000 rustfsadmin rustfsadmin +``` + +--- + +### Access Method Comparison + +| Access Method | Requires port-forward? | Use Case | Endpoint Example | +|--------------|----------------------|----------|------------------| +| **Cluster-Internal** | โŒ **No** | Cluster-internal applications | `http://rustfs.default.svc.cluster.local:9000` | +| **Port Forwarding** | โœ… **Yes** | Local development/testing | `http://localhost:9000` | +| **Ingress** | โŒ No | Production environment (HTTPS) | `https://rustfs.example.com` | +| **LoadBalancer** | โŒ No | Cloud environments | `http://:9000` | + +--- + +### 3. Access Web Console + +#### Cluster-Internal Access (No port-forward needed) + +```bash +# In Pod inside cluster +curl http://dev-minimal-console.default.svc.cluster.local:9001 +``` + +#### Port Forwarding Access (Requires port-forward) + +```bash +# Forward console port (9001) +kubectl port-forward svc/dev-minimal-console 9001:9001 + +# Open in browser +open http://localhost:9001 +``` + +**Default Credentials**: +- Username: `rustfsadmin` +- Password: `rustfsadmin` + +โš ๏ธ **Must change default credentials in production!** + +--- + +## ๐Ÿ“Š Configuration Examples Comparison + +### Development Environment (Minimal Configuration) + +```yaml +pools: + - name: dev-pool + servers: 1 # 1 node + persistence: + volumesPerServer: 4 # 4 volumes per node +``` + +**Result**: +- 1 Pod +- 4 PVCs (10Gi each) +- Total storage: 40Gi +- **Use Case**: Local development, testing, learning + +### Production Environment (High Availability) + +```yaml +pools: + - name: production + servers: 8 # 8 nodes + persistence: + volumesPerServer: 4 # 4 volumes per node + volumeClaimTemplate: + resources: + requests: + storage: 100Gi # 100Gi per volume +``` + +**Result**: +- 8 Pods (distributed across multiple nodes) +- 32 PVCs (100Gi each) +- Total storage: 3.2Ti +- **Use Case**: Production environment, high availability, large capacity + +### Multi-Pool Configuration (Scaling Storage) + +```yaml +pools: + - name: pool-0 + servers: 4 + persistence: + volumesPerServer: 4 # 16 volumes + + - name: pool-1 + servers: 4 + persistence: + volumesPerServer: 4 # 16 volumes +``` + +**Result**: +- 8 Pods (2 StatefulSets) +- 32 PVCs +- **All pools form a unified cluster** +- **Use Case**: Need to scale storage capacity + +--- + +## ๐Ÿ” Data Storage Flow + +### Write Data Flow + +``` +1. Client uploads object to S3 API (port 9000) + โ†“ +2. RustFS receives object + โ†“ +3. RustFS uses erasure coding algorithm: + - Splits object into data shards + - Calculates parity shards + โ†“ +4. Distributes shards across multiple storage volumes: + - /data/rustfs0 โ† Data shard 1 + - /data/rustfs1 โ† Data shard 2 + - /data/rustfs2 โ† Parity shard 1 + - /data/rustfs3 โ† Parity shard 2 + โ†“ +5. Data persisted to PVC (underlying storage) +``` + +### Read Data Flow + +``` +1. Client requests object + โ†“ +2. RustFS locates object shard positions + โ†“ +3. Reads shards from multiple storage volumes: + - /data/rustfs0 โ†’ Data shard 1 + - /data/rustfs1 โ†’ Data shard 2 + - /data/rustfs2 โ†’ Parity shard 1 (if needed) + โ†“ +4. Uses erasure coding algorithm to reconstruct complete object + โ†“ +5. Returns object to client +``` + +### Failure Recovery Flow + +``` +Scenario: /data/rustfs0 volume failure + โ†“ +1. RustFS detects volume unavailable + โ†“ +2. Reads data and parity shards from other volumes: + - /data/rustfs1 โ†’ Data shard 2 + - /data/rustfs2 โ†’ Parity shard 1 + - /data/rustfs3 โ†’ Parity shard 2 + โ†“ +3. Uses erasure coding algorithm to reconstruct lost data shard + โ†“ +4. When volume recovers, automatically rebuilds data +``` + +--- + +## ๐Ÿ“ˆ Capacity Planning + +### Storage Capacity Calculation + +**Formula**: `Total capacity = servers ร— volumesPerServer ร— single volume capacity` + +**Example**: +```yaml +servers: 4 +volumesPerServer: 4 +volumeClaimTemplate: + resources: + requests: + storage: 100Gi +``` + +**Calculation**: +- Total volumes: 4 ร— 4 = 16 volumes +- Total capacity: 16 ร— 100Gi = 1.6Ti + +### Usable Capacity + +Due to erasure coding redundancy, **usable capacity < total capacity**: + +- **EC:2** (2 data shards + 2 parity shards): Usable capacity = Total capacity ร— 50% +- **EC:4** (4 data shards + 4 parity shards): Usable capacity = Total capacity ร— 50% + +**Example**: +- Total capacity: 1.6Ti +- Usable capacity: Approximately 800Gi (50%) + +### Performance Considerations + +- **More volumes**: Better parallel I/O, higher throughput +- **More nodes**: Better load distribution, higher availability +- **Storage type**: SSD > HDD (performance) + +--- + +## ๐ŸŽฏ Use Cases + +### 1. Application Data Storage + +```yaml +# Use RustFS as object storage backend in application configuration +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config +data: + S3_ENDPOINT: "http://rustfs.default.svc.cluster.local:9000" + S3_BUCKET: "app-data" + S3_ACCESS_KEY: "rustfsadmin" + S3_SECRET_KEY: "rustfsadmin" +``` + +### 2. Backup Storage + +```yaml +# Velero backup to RustFS +apiVersion: velero.io/v1 +kind: BackupStorageLocation +metadata: + name: rustfs-backup +spec: + provider: aws + objectStorage: + bucket: velero-backups + prefix: backups + config: + region: us-east-1 + s3ForcePathStyle: "true" + s3Url: "http://rustfs.default.svc.cluster.local:9000" +``` + +### 3. CI/CD Build Artifact Storage + +```yaml +# GitLab CI configuration +build: + script: + - aws s3 cp build.tar.gz s3://artifacts/myapp/ --endpoint-url http://rustfs:9000 +``` + +--- + +## ๐Ÿ”— Related Documentation + +- [RustFS Kubernetes Integration](./RUSTFS-K8S-INTEGRATION.md) +- [Development Environment Setup](./DEVELOPMENT.md) +- [Usage Examples](../examples/README.md) + +--- + +## Summary + +**Configuration Meanings**: +- `servers: 1` โ†’ 1 RustFS server node (Pod) +- `volumesPerServer: 4` โ†’ 4 storage volumes per node +- **Total volumes** = 1 ร— 4 = 4 volumes (meets minimum requirement) + +**Object Storage Usage**: +1. RustFS provides S3-compatible API (port 9000) +2. Data is distributed across all storage volumes via erasure coding +3. Supports standard S3 clients and SDKs +4. Provides Web console (port 9001) for management + +**Key Understanding**: +- Storage volumes are RustFS's physical storage units +- Multiple volumes provide data redundancy and performance +- At least 4 volumes are required for normal operation (erasure coding requirement) diff --git a/src/types/v1alpha1/k8s.rs b/src/types/v1alpha1/k8s.rs index 9e9dfda..00230bb 100644 --- a/src/types/v1alpha1/k8s.rs +++ b/src/types/v1alpha1/k8s.rs @@ -15,7 +15,9 @@ //! Common Kubernetes enum types used across the operator use k8s_openapi::schemars::JsonSchema; +use schemars::{Schema, SchemaGenerator, json_schema}; use serde::{Deserialize, Serialize}; +use std::borrow::Cow; use strum::Display; /// Pod management policy for StatefulSets @@ -64,9 +66,8 @@ pub enum ImagePullPolicy { /// /// WARNING: Force-deleting pods can have data consistency implications depending on /// your storage backend and workload semantics. -#[derive(Default, Deserialize, Serialize, Clone, Debug, JsonSchema, Display, PartialEq, Eq)] +#[derive(Default, Deserialize, Serialize, Clone, Debug, Display, PartialEq, Eq)] #[serde(rename_all = "PascalCase")] -#[schemars(rename_all = "PascalCase")] pub enum PodDeletionPolicyWhenNodeIsDown { /// Do not delete pods automatically. #[strum(to_string = "DoNothing")] @@ -94,3 +95,34 @@ pub enum PodDeletionPolicyWhenNodeIsDown { #[strum(to_string = "DeleteBothStatefulSetAndDeploymentPod")] DeleteBothStatefulSetAndDeploymentPod, } + +impl JsonSchema for PodDeletionPolicyWhenNodeIsDown { + fn schema_name() -> Cow<'static, str> { + Cow::Borrowed("PodDeletionPolicyWhenNodeIsDown") + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(concat!( + module_path!(), + "::", + "PodDeletionPolicyWhenNodeIsDown" + )) + } + + fn json_schema(_gen: &mut SchemaGenerator) -> Schema { + json_schema! { + { + "type": "string", + "enum": [ + "DoNothing", + "Delete", + "ForceDelete", + "DeleteStatefulSetPod", + "DeleteDeploymentPod", + "DeleteBothStatefulSetAndDeploymentPod" + ], + "description": "Pod deletion policy when the node hosting the Pod is down (NotReady/Unknown). Values: DoNothing | Delete | ForceDelete | DeleteStatefulSetPod | DeleteDeploymentPod | DeleteBothStatefulSetAndDeploymentPod" + } + } + } +}