初始提交
spellcheck-woke / Run woke (push) Has been cancelled Details
spellcheck-woke / Run spellcheck (push) Has been cancelled Details
Refresh licenses directory / Refresh licenses (push) Has been cancelled Details
CodeQL / Skip duplicate runs (push) Has been cancelled Details
CodeQL / Analyze (push) Has been cancelled Details

This commit is contained in:
Your Name 2025-09-19 11:54:43 +08:00
commit 7d418deea9
1547 changed files with 623906 additions and 0 deletions

View File

@ -0,0 +1,24 @@
{
"image": "mcr.microsoft.com/devcontainers/go:1.24-bookworm",
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/mpriscella/features/kind:1": {},
"ghcr.io/rjfmachado/devcontainer-features/cloud-native:1": {
"kubectl": "latest",
"helm": "latest",
"kubelogin": "none",
"azwi": "none",
"flux": "none",
"cilium": "none"
},
"ghcr.io/guiyomh/features/golangci-lint:0": {},
"ghcr.io/devcontainers-contrib/features/kubectx-kubens:1": {},
"ghcr.io/dhoeric/features/stern:1": {}
},
// Needed by kind to enable kube-proxy's ipvs mode
"mounts":["type=bind,source=/lib/modules,target=/lib/modules"],
// Enable kubectl short alias with completion
"postCreateCommand": "echo 'alias k=kubectl; complete -F __start_kubectl k' >> ~/.bash_aliases; git clone https://github.com/magicmonty/bash-git-prompt.git ~/.bash-git-prompt --depth=1; echo 'if [ -f \"$HOME/.bash-git-prompt/gitprompt.sh\" ]; then . \"$HOME/.bash-git-prompt/gitprompt.sh\"; fi' >> ~/.bashrc"
}

4
.dockerignore Normal file
View File

@ -0,0 +1,4 @@
/tests/
/bin/
/.git/
/_*/

124
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View File

@ -0,0 +1,124 @@
name: Bug Report
description: File a bug report
title: "[Bug]: "
labels: ["triage", "bug"]
projects: ["cloudnative-pg/cloudnative-pg"]
assignees:
- gbartolini
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report for CloudNativePg!
- type: checkboxes
id: search
attributes:
label: Is there an existing issue already for this bug?
description: Before you submit a bug, make sure you have searched if a similar one already exists
options:
- label: I have searched for an existing issue, and could not find anything. I believe this is a new bug.
required: true
- type: checkboxes
id: troubleshooting
attributes:
label: I have read the troubleshooting guide
description: Before you submit a bug, make sure you have read the ["Common issues" section in the Troubleshooting guide](https://cloudnative-pg.io/documentation/current/troubleshooting/#some-common-issues).
options:
- label: I have read the troubleshooting guide and I think this is a new bug.
required: true
- type: checkboxes
id: supported
attributes:
label: I am running a supported version of CloudNativePG
description: Before you submit a bug, make sure you have read ["Supported releases"](https://cloudnative-pg.io/documentation/current/supported_releases/) and that you are running a supported version of CloudNativePG with the latest patch/security fixes, or you are working on the current trunk (`main` branch)
options:
- label: I have read the troubleshooting guide and I think this is a new bug.
required: true
- type: input
id: contact
attributes:
label: Contact Details
description: How can we get in touch with you if we need more info?
placeholder: ex. email@example.com
validations:
required: false
- type: dropdown
id: version
attributes:
label: Version
description: What is the version of CloudNativePG you are running?
options:
- "1.25 (latest patch)"
- "1.24 (latest patch)"
- "trunk (main)"
- "older in 1.24.x"
- "older minor (unsupported)"
validations:
required: true
- type: dropdown
id: k8s_version
attributes:
label: What version of Kubernetes are you using?
options:
- "1.32"
- "1.31"
- "1.30"
- "1.29"
- "other (unsupported)"
validations:
required: true
- type: dropdown
id: k8s_environment
attributes:
label: What is your Kubernetes environment?
options:
- "Self-managed: kind (evaluation)"
- "Self-managed: k0s"
- "Self-managed: k3s"
- "Self-managed: RKE"
- "Cloud: Amazon EKS"
- "Cloud: Google GKE"
- "Cloud: Azure AKS"
- "Cloud: Other"
- "Other"
validations:
required: true
- type: dropdown
id: installation_method
attributes:
label: How did you install the operator?
options:
- "YAML manifest"
- "Helm"
- "OLM"
- "Other"
validations:
required: true
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen?
placeholder: Tell us what you see!
validations:
required: true
- type: textarea
id: spec
attributes:
label: Cluster resource
description: Please copy and paste the CR of the cluster
render: shell
- type: textarea
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
render: shell
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Open discussions
url: https://github.com/cloudnative-pg/cloudnative-pg/discussions
about: Please ask and answer questions here.
- name: Slack chat
url: https://github.com/cloudnative-pg/cloudnative-pg?tab=readme-ov-file#communications
about: Please join the slack channel and interact with our community

View File

@ -0,0 +1,74 @@
name: Documentation
description: Issues or improvement ideas for CloudNativePG documentation
title: "[Docs]: "
labels: ["triage", "documentation"]
projects: ["cloudnative-pg/cloudnative-pg"]
assignees:
- gbartolini
body:
- type: markdown
attributes:
value: |
Thanks for taking the time improve the documentation of CloudNativePG
- type: checkboxes
id: search
attributes:
label: Is there an existing issue already for your request/idea?
description: Before you submit a new issue, make sure you have searched if a similar one already exists
options:
- label: I have searched for an existing issue, and could not find anything. I believe this is a new documentation enhancement to be evaluated.
required: true
- type: textarea
id: problem
attributes:
label: What problem in the existing documentation this issue aims to solve?
description: A clear and concise description of what the problem is (e.g. I'm always frustrated when [...])
validations:
required: true
- type: textarea
id: doc_add
attributes:
label: Describe what additions need to be done to the documentation
description: A clear and concise description of what sections/pages you want to add to the current documentation.
validations:
required: false
- type: textarea
id: doc_change
attributes:
label: Describe what pages need to change in the documentation, if any
description: Please provide links to the pages in the current documentation
validations:
required: false
- type: textarea
id: doc_remove
attributes:
label: Describe what pages need to be removed from the documentation, if any
description: Please provide links to the pages in the current documentation that you want to be removed
validations:
required: false
- type: textarea
id: additional_context
attributes:
label: Additional context
description: Add any other context about this issue.
validations:
required: false
- type: dropdown
id: backport
attributes:
label: Backport?
description: Do you suggest to backport this feature or not?
options:
- "Yes"
- "No"
- "N/A"
validations:
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

77
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View File

@ -0,0 +1,77 @@
name: Feature request
description: Suggest an idea or request a new feature for CloudNativePG
title: "[Feature]: "
labels: ["triage", "enhancement"]
projects: ["cloudnative-pg/cloudnative-pg"]
assignees:
- gbartolini
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to propose new ideas and feature requests to CloudNativePG
- type: checkboxes
id: search
attributes:
label: Is there an existing issue already for this feature request/idea?
description: Before you submit a new feature request, make sure you have searched if a similar one already exists
options:
- label: I have searched for an existing issue, and could not find anything. I believe this is a new feature request to be evaluated.
required: true
- type: textarea
id: problem
attributes:
label: What problem is this feature going to solve? Why should it be added?
description: A clear and concise description of what the problem is (e.g. I'm always frustrated when [...])
validations:
required: true
- type: textarea
id: solution
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: true
- type: textarea
id: additional_context
attributes:
label: Additional context
description: Add any other context or screenshots about the feature request here.
validations:
required: false
- type: dropdown
id: backport
attributes:
label: Backport?
description: Do you suggest to backport this feature or not?
options:
- "Yes"
- "No"
- "N/A"
validations:
required: true
- type: dropdown
id: contribute
attributes:
label: Are you willing to actively contribute to this feature?
description: Let us know if you are interested in developing this feature.
options:
- "No"
- "Yes"
validations:
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@ -0,0 +1,48 @@
name: Release Notes
description: Release notes for a new version of CloudNativePG
title: "[Release Notes]: CloudNativePG 1.XX.Y and 1.XX-1.Z"
labels: ["triage", "documentation"]
projects: ["cloudnative-pg/cloudnative-pg"]
assignees:
- gbartolini
body:
- type: markdown
attributes:
value: |
Make sure that the correct versions are reported in the title of the ticket.
- type: checkboxes
id: search
attributes:
label: Is there an existing issue already for this task?
description: Before you submit a new issue, make sure you have searched if a similar one already exists
options:
- label: I have searched for an existing issue, and could not find anything. I believe this is a new request.
required: true
- type: dropdown
id: minor
attributes:
label: Is this a new minor release?
description: Is this a new minor release for CloudNativePG? If so, make sure you check the `contribute/release-notes-template.md` file.
options:
- "No"
- "Yes"
validations:
required: true
- type: dropdown
id: preview
attributes:
label: Is this a preview release?
description: Is this a preview release for CloudNativePG? If so, make sure you add `-RC1` to the version and update the `preview_version.md` file.
options:
- "No"
- "Yes"
validations:
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

6
.github/aks_versions.json vendored Normal file
View File

@ -0,0 +1,6 @@
[
"1.32.0",
"1.31.5",
"1.30.9",
"1.29.9"
]

382
.github/e2e-matrix-generator.py vendored Normal file
View File

@ -0,0 +1,382 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import json
import os
import re
import sys
from operator import itemgetter
from typing import Dict, List
POSTGRES_REPO = "ghcr.io/cloudnative-pg/postgresql"
PG_VERSIONS_FILE = ".github/pg_versions.json"
AKS_VERSIONS_FILE = ".github/aks_versions.json"
EKS_VERSIONS_FILE = ".github/eks_versions.json"
GKE_VERSIONS_FILE = ".github/gke_versions.json"
OPENSHIFT_VERSIONS_FILE = ".github/openshift_versions.json"
KIND_VERSIONS_FILE = ".github/kind_versions.json"
VERSION_SCOPE_FILE = ".github/k8s_versions_scope.json"
E2E_TEST_TIMEOUT = ".github/e2e_test_timeout.json"
class VersionList(list):
"""List of versions"""
def __init__(self, versions: List[str]):
super().__init__(versions)
@property
def latest(self):
return self[0]
@property
def oldest(self):
return self[-1]
class MajorVersionList(dict):
"""List of major versions, with multiple patch levels"""
def __init__(self, version_lists: Dict[str, List[str]]):
sorted_versions = {
k: VersionList(version_lists[k]) for k in version_lists.keys()
}
super().__init__(sorted_versions)
self.versions = list(self.keys())
@property
def latest(self):
if "beta" in self[self.versions[0]][0]:
return self.get(self.versions[1])
return self.get(self.versions[0])
@property
def oldest(self):
return self.get(self.versions[-1])
# go through the version_list and filter the k8s version which is less than min_version
def filter_version(versions_list, version_range):
min_version = version_range["min"]
max_version = version_range["max"] or "99.99"
return list(
filter(
lambda x: max_version >= re.sub(r"v", "", x)[0:4] >= min_version,
versions_list,
)
)
# Default timeout for the e2e test
try:
with open(E2E_TEST_TIMEOUT) as json_file:
timeout_list = json.load(json_file)
TIMEOUT_LIST = timeout_list
except:
print(f"Failed opening file: {E2E_TEST_TIMEOUT}")
exit(1)
# Minimum support k8s version (include) in different cloud vendor
try:
with open(VERSION_SCOPE_FILE) as json_file:
version_list = json.load(json_file)
SUPPORT_K8S_VERSION = version_list["e2e_test"]
print(SUPPORT_K8S_VERSION)
except:
print(f"Failed opening file: {VERSION_SCOPE_FILE}")
exit(1)
# Kubernetes versions on kind to use during the tests
try:
with open(KIND_VERSIONS_FILE) as json_file:
version_list = json.load(json_file)
kind_versions = filter_version(version_list, SUPPORT_K8S_VERSION["KIND"])
KIND_K8S = VersionList(kind_versions)
except:
print(f"Failed opening file: {KIND_VERSIONS_FILE}")
exit(1)
# Kubernetes versions on EKS to use during the tests
try:
with open(EKS_VERSIONS_FILE) as json_file:
version_list = json.load(json_file)
eks_versions = filter_version(version_list, SUPPORT_K8S_VERSION["EKS"])
EKS_K8S = VersionList(eks_versions)
except:
print(f"Failed opening file: {EKS_VERSIONS_FILE}")
exit(1)
# Kubernetes versions on AKS to use during the tests
try:
with open(AKS_VERSIONS_FILE) as json_file:
version_list = json.load(json_file)
aks_versions = filter_version(version_list, SUPPORT_K8S_VERSION["AKS"])
AKS_K8S = VersionList(aks_versions)
except:
print(f"Failed opening file: {AKS_VERSIONS_FILE}")
exit(1)
# Kubernetes versions on GKE to use during the tests
try:
with open(GKE_VERSIONS_FILE) as json_file:
version_list = json.load(json_file)
gke_versions = filter_version(version_list, SUPPORT_K8S_VERSION["GKE"])
GKE_K8S = VersionList(gke_versions)
except:
print(f"Failed opening file: {GKE_VERSIONS_FILE}")
exit(1)
# OpenShift version to use during the tests
try:
with open(OPENSHIFT_VERSIONS_FILE) as json_file:
version_list = json.load(json_file)
openshift_versions = filter_version(
version_list, SUPPORT_K8S_VERSION["OPENSHIFT"]
)
OPENSHIFT_K8S = VersionList(openshift_versions)
except:
print(f"Failed opening file: {OPENSHIFT_VERSIONS_FILE}")
exit(1)
# PostgreSQL versions to use during the tests
# Entries are expected to be ordered from newest to oldest
# First entry is used as default testing version
# Entries format:
# MAJOR: [VERSION, PRE_ROLLING_UPDATE_VERSION],
try:
with open(PG_VERSIONS_FILE, "r") as json_file:
postgres_versions = json.load(json_file)
POSTGRES = MajorVersionList(postgres_versions)
except:
print(f"Failed opening file: {PG_VERSIONS_FILE}")
exit(1)
class E2EJob(dict):
"""Build a single job of the matrix"""
def __init__(self, k8s_version, postgres_version_list, flavor):
postgres_version = postgres_version_list.latest
postgres_version_pre = postgres_version_list.oldest
if flavor == "pg":
name = f"{k8s_version}-PostgreSQL-{postgres_version}"
repo = POSTGRES_REPO
kind = "PostgreSQL"
super().__init__(
{
"id": name,
"k8s_version": k8s_version,
"postgres_version": postgres_version,
"postgres_kind": kind,
"postgres_img": f"{repo}:{postgres_version}",
"postgres_pre_img": f"{repo}:{postgres_version_pre}",
}
)
def __hash__(self):
return hash(self["id"])
def build_push_include_local():
"""Build the list of tests running on push"""
return {
E2EJob(KIND_K8S.latest, POSTGRES.latest, "pg"),
E2EJob(KIND_K8S.oldest, POSTGRES.oldest, "pg"),
}
def build_pull_request_include_local():
"""Build the list of tests running on pull request"""
result = build_push_include_local()
# Iterate over K8S versions
for k8s_version in KIND_K8S:
result |= {
E2EJob(k8s_version, POSTGRES.latest, "pg"),
}
# Iterate over PostgreSQL versions
for postgres_version in POSTGRES.values():
result |= {E2EJob(KIND_K8S.latest, postgres_version, "pg")}
return result
def build_main_include_local():
"""Build the list tests running on main"""
result = build_pull_request_include_local()
# Iterate over K8S versions
for k8s_version in KIND_K8S:
result |= {
E2EJob(k8s_version, POSTGRES.latest, "pg"),
}
# Iterate over PostgreSQL versions
for postgres_version in POSTGRES.values():
result |= {E2EJob(KIND_K8S.latest, postgres_version, "pg")}
return result
def build_schedule_include_local():
"""Build the list of tests running on schedule"""
# For the moment scheduled tests are identical to main
return build_main_include_local()
def build_push_include_cloud(engine_version_list):
return {}
def build_pull_request_include_cloud(engine_version_list):
return {
E2EJob(engine_version_list.latest, POSTGRES.latest, "pg"),
}
def build_main_include_cloud(engine_version_list):
return {
E2EJob(engine_version_list.latest, POSTGRES.latest, "pg"),
}
def build_schedule_include_cloud(engine_version_list):
"""Build the list of tests running on schedule"""
result = set()
# Iterate over K8S versions
for k8s_version in engine_version_list:
result |= {
E2EJob(k8s_version, POSTGRES.latest, "pg"),
}
return result
ENGINE_MODES = {
"local": {
"push": build_push_include_local,
"pull_request": build_pull_request_include_local,
"issue_comment": build_pull_request_include_local,
"workflow_dispatch": build_pull_request_include_local,
"main": build_main_include_local,
"schedule": build_schedule_include_local,
},
"eks": {
"push": lambda: build_push_include_cloud(EKS_K8S),
"pull_request": lambda: build_pull_request_include_cloud(EKS_K8S),
"issue_comment": lambda: build_pull_request_include_cloud(EKS_K8S),
"workflow_dispatch": lambda: build_pull_request_include_cloud(EKS_K8S),
"main": lambda: build_main_include_cloud(EKS_K8S),
"schedule": lambda: build_schedule_include_cloud(EKS_K8S),
},
"aks": {
"push": lambda: build_push_include_cloud(AKS_K8S),
"pull_request": lambda: build_pull_request_include_cloud(AKS_K8S),
"issue_comment": lambda: build_pull_request_include_cloud(AKS_K8S),
"workflow_dispatch": lambda: build_pull_request_include_cloud(AKS_K8S),
"main": lambda: build_main_include_cloud(AKS_K8S),
"schedule": lambda: build_schedule_include_cloud(AKS_K8S),
},
"gke": {
"push": lambda: build_push_include_cloud(GKE_K8S),
"pull_request": lambda: build_pull_request_include_cloud(GKE_K8S),
"issue_comment": lambda: build_pull_request_include_cloud(GKE_K8S),
"workflow_dispatch": lambda: build_pull_request_include_cloud(GKE_K8S),
"main": lambda: build_main_include_cloud(GKE_K8S),
"schedule": lambda: build_schedule_include_cloud(GKE_K8S),
},
"openshift": {
"push": lambda: build_push_include_cloud(OPENSHIFT_K8S),
"pull_request": lambda: build_pull_request_include_cloud(OPENSHIFT_K8S),
"issue_comment": lambda: build_pull_request_include_cloud(OPENSHIFT_K8S),
"workflow_dispatch": lambda: build_pull_request_include_cloud(OPENSHIFT_K8S),
"main": lambda: build_main_include_cloud(OPENSHIFT_K8S),
"schedule": lambda: build_schedule_include_cloud(OPENSHIFT_K8S),
},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create the job matrix")
parser.add_argument(
"-m",
"--mode",
type=str,
choices={
"push",
"pull_request",
"issue_comment",
"workflow_dispatch",
"main",
"schedule",
},
default="push",
help="set of tests to run",
)
parser.add_argument(
"-l",
"--limit",
type=str,
default="",
help="limit to a list of engines",
)
args = parser.parse_args()
engines = set(ENGINE_MODES.keys())
if args.limit:
required_engines = set(re.split(r"[, ]+", args.limit.strip()))
if len(wrong_engines := required_engines - engines):
raise SystemExit(
f"Limit contains unknown engines {wrong_engines}. Available engines: {engines}"
)
engines = required_engines
matrix = {}
for engine in ENGINE_MODES:
include = {}
if engine in engines:
include = list(
sorted(ENGINE_MODES[engine][args.mode](), key=itemgetter("id"))
)
for job in include:
job["id"] = engine + "-" + job["id"]
print(f"Generating {engine}: {job['id']}", file=sys.stderr)
try:
with open(os.getenv("GITHUB_OUTPUT"), "a") as github_output:
print(
f"{engine}Matrix=" + json.dumps({"include": include}),
file=github_output,
)
print(f"{engine}Enabled=" + str(len(include) > 0), file=github_output)
print(
f"{engine}E2ETimeout=" + json.dumps(TIMEOUT_LIST.get(engine, {})),
file=github_output,
)
except:
print(
f"Output file GITHUB_OUTPUT is not defined, can't write output matrix"
)
exit(1)

87
.github/e2e_test_timeout.json vendored Normal file
View File

@ -0,0 +1,87 @@
{
"local": {
"failover": 240,
"namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
"newPrimaryAfterSwitchover": 45,
"newPrimaryAfterFailover": 30,
"newTargetOnFailover": 120,
"operatorIsReady": 120,
"largeObject": 300,
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
"drainNode": 900,
"short": 5
},
"aks": {
"failover": 240,
"namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
"newPrimaryAfterSwitchover": 45,
"newPrimaryAfterFailover": 30,
"newTargetOnFailover": 120,
"operatorIsReady": 120,
"largeObject": 300,
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
"drainNode": 900,
"short": 10
},
"eks": {
"failover": 240,
"namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
"newPrimaryAfterSwitchover": 45,
"newPrimaryAfterFailover": 30,
"newTargetOnFailover": 120,
"operatorIsReady": 120,
"largeObject": 300,
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
"drainNode": 900,
"short": 10
},
"gke": {
"failover": 240,
"namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
"newPrimaryAfterSwitchover": 45,
"newPrimaryAfterFailover": 30,
"newTargetOnFailover": 120,
"operatorIsReady": 120,
"largeObject": 300,
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
"drainNode": 900,
"short": 10
},
"openshift": {
"failover": 240,
"namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
"newPrimaryAfterSwitchover": 45,
"newPrimaryAfterFailover": 30,
"newTargetOnFailover": 120,
"operatorIsReady": 120,
"largeObject": 300,
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
"drainNode": 900,
"short": 10
}
}

6
.github/eks_versions.json vendored Normal file
View File

@ -0,0 +1,6 @@
[
"1.32",
"1.31",
"1.30",
"1.29"
]

285
.github/generate-test-artifacts.py vendored Normal file
View File

@ -0,0 +1,285 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import json
import re
import os
import hashlib
from datetime import *
def flatten(arr):
"""flatten an array of arrays"""
out = []
for l in arr:
if isinstance(l, list):
for item in l:
out.append(item)
else:
print("unexpected hierarchy labels")
print(arr)
return out
def env_to_json():
"""Convert a set of environment variables into a valid JSON with the following format:
{
"runner": , # e.g. local, aks, eks, gke
"id": , # the matrix ID e.g. local-v1.22.2-PostgreSQL-13.5
"postgres": , # version of PostgreSQL e.g. 13.5
"postgres_kind": , # flavor of PostgreSQL
"kubernetes": , # version of K8s e.g. v1.22.2
"runid": , # the GH Action run-id -> ${{ github.run_id }}
"branch": , # dev/xxxx-1666 -> you get this with "${{ github.head_ref }}" ... EXCEPT
"refname": , # it may be blank, and then we want: "${{ github.ref_name }}"
"repo": , # cloudnative-pg/cloudnative-pg -> you get this from GH with ${{ github.repository }}
}
"""
runner = os.getenv("RUNNER")
postgres = os.getenv("POSTGRES_VERSION")
postgres_kind = os.getenv("POSTGRES_KIND")
kubernetes_version = os.getenv("K8S_VERSION")
runid = os.getenv("RUN_ID")
id = os.getenv("MATRIX")
repo = os.getenv("REPOSITORY")
branch = os.getenv("BRANCH_NAME")
refname = os.getenv("GIT_REF")
matrix = f"""
{{
"runner": "{runner}",
"postgres": "{postgres}",
"postgres_kind": "{postgres_kind}",
"kubernetes": "{kubernetes_version}",
"runid": "{runid}",
"id": "{id}",
"repo": "{repo}",
"branch": "{branch}",
"refname": "{refname}"
}}
"""
return matrix
def is_user_spec(spec):
"""Checks if the spec contains the fields used to build the test name.
The JSON report produced by Ginkgo may contain
SpecReports entries that are for internal Ginkgo purposes and will not
reflect user-defined Specs. For these entries, ContainerHierarchyTexts may
be null or the LeafNodeText may be blank
"""
if spec["LeafNodeText"] == "":
return False
try:
_ = " - ".join(spec["ContainerHierarchyTexts"])
return True
except TypeError:
return False
def convert_ginkgo_test(test, matrix):
"""Converts a test spec in ginkgo JSON format into a normalized JSON object.
The matrix arg will be passed from the GH Actions, and is expected to be
a JSON of the form:
{
"runner": , # e.g. local, aks, eks, gke
"id": , # the matrix ID e.g. local-v1.22.2-PostgreSQL-13.5
"postgres": , # version of PostgreSQL e.g. 13.5
"postgres_kind": , # flavor of PostgreSQL
"kubernetes": , # version of K8s e.g. v1.22.2
"runid": , # the GH Action run-id -> ${{ github.run_id }}
"branch": , # dev/xxxx-1666 -> you get this with "${{ github.head_ref }}" ... EXCEPT
"refname": , # it may be blank, and then we want: "${{ github.ref_name }}"
"repo": , # cloudnative-pg/cloudnative-pg -> you get this from GH with ${{ github.repository }}
}
"""
err = ""
err_file = ""
err_line = 0
if "Failure" in test:
err = test["Failure"]["Message"]
err_file = test["Failure"]["Location"]["FileName"]
err_line = test["Failure"]["Location"]["LineNumber"]
state = test["State"]
branch = matrix["branch"]
if branch == "":
branch = matrix["refname"]
ginkgo_format = {
"name": " - ".join(test["ContainerHierarchyTexts"])
+ " -- "
+ test["LeafNodeText"],
"state": state,
"start_time": test["StartTime"],
"end_time": test[
"EndTime"
], # NOTE: Grafana will need a default timestamp field. This is a good candidate
"error": err,
"error_file": err_file,
"error_line": err_line,
"platform": matrix["runner"],
"postgres_kind": matrix["postgres_kind"],
"matrix_id": matrix["id"],
"postgres_version": matrix["postgres"],
"k8s_version": matrix["kubernetes"],
"workflow_id": matrix["runid"],
"repo": matrix["repo"],
"branch": branch,
}
return ginkgo_format
def write_artifact(artifact, artifact_dir, matrix):
"""writes an artifact to local storage as a JSON file
The computed filename will be used as the ID to introduce the payload into
Elastic for the E2E Test . Should be unique across the current GH run.
So: MatrixID + Test
Because we may run this on MSFT Azure, where filename length limits still
exist, we HASH the test name.
The platform team's scraping script will add the GH Run ID to this, and the
Repository, and with Repo + Run ID + MatrixID + Test Hash, gives a unique
ID in Elastic to each object.
"""
whitespace = re.compile(r"\s")
slug = whitespace.sub("_", artifact["name"])
h = hashlib.sha224(slug.encode("utf-8")).hexdigest()
filename = matrix["id"] + "_" + h + ".json"
if artifact_dir != "":
filename = artifact_dir + "/" + filename
try:
with open(filename, "w") as f:
f.write(json.dumps(artifact))
except (FileNotFoundError, PermissionError) as e:
print(f"Error: {e}")
def create_artifact(matrix, name, state, error):
"""creates an artifact with a given name, state and error,
with the metadata provided by the `matrix` argument.
Useful to generate artifacts that signal failures outside the Test Suite,
for example if the suite never executed
"""
branch = matrix["branch"]
if branch == "":
branch = matrix["refname"]
return {
"name": name,
"state": state,
"start_time": datetime.now().isoformat(),
"end_time": datetime.now().isoformat(), # NOTE: Grafana will need a default timestamp field. This is a good candidate
"error": error,
"error_file": "no-file",
"error_line": 0,
"platform": matrix["runner"],
"matrix_id": matrix["id"],
"postgres_kind": matrix["postgres_kind"],
"postgres_version": matrix["postgres"],
"k8s_version": matrix["kubernetes"],
"workflow_id": matrix["runid"],
"repo": matrix["repo"],
"branch": branch,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create JSON artifacts from E2E JSON report"
)
parser.add_argument(
"-f",
"--file",
type=str,
help="report JSON file with test run, as produce by ginkgo",
)
parser.add_argument(
"-o",
"--outdir",
type=str,
default="",
help="directory where we write the artifacts",
)
parser.add_argument(
"-m", "--matrix", type=str, help="the matrix with GH execution variables"
)
parser.add_argument(
"-e",
"--environment",
type=bool,
help="get the matrix arguments from environment variables. "
"Variables defined with -m/--matrix take priority",
)
args = parser.parse_args()
print("test matrix: ")
matrix = {}
# First, try to gather the matrix from env variables
if args.environment:
matrix = json.loads(env_to_json())
# If defined, user provided arguments will take priority
if args.matrix:
args_matrix = json.loads(args.matrix)
matrix.update(args_matrix)
print(matrix)
outputDir = ""
if args.outdir:
outputDir = args.outdir
if not os.path.exists(outputDir):
os.makedirs(outputDir)
print("Directory ", outputDir, " Created ")
# If the ginkgo report file is not found, produce a "failed" artifact
if not os.path.exists(args.file):
print("Report ", args.file, " not found ")
# we still want to get an entry in the E2E Dashboard for workflows that even
# failed to run the ginkgo suite or failed to produce a JSON report.
# We create a custom Artifact with a `failed` status for the Dashboard
artifact = create_artifact(
matrix,
"Open Ginkgo report",
"failed",
"ginkgo Report Not Found: " + args.file,
)
write_artifact(artifact, outputDir, matrix)
exit(0)
# MAIN LOOP: go over each `SpecReport` in the Ginkgo JSON output, convert
# each to the normalized JSON format and create a JSON file for each of those
try:
with open(args.file) as json_file:
testResults = json.load(json_file)
for t in testResults[0]["SpecReports"]:
if (t["State"] != "skipped") and is_user_spec(t):
test1 = convert_ginkgo_test(t, matrix)
write_artifact(test1, outputDir, matrix)
except Exception as e:
# Reflect any unexpected failure in an artifact
artifact = create_artifact(
matrix, "Generate artifacts from Ginkgo report", "failed", f"{e}"
)
write_artifact(artifact, outputDir, matrix)

6
.github/gke_versions.json vendored Normal file
View File

@ -0,0 +1,6 @@
[
"1.32",
"1.31",
"1.30",
"1.29"
]

10
.github/k8s_versions_scope.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"e2e_test": {
"KIND": {"min": "1.27", "max": ""},
"AKS": {"min": "1.28", "max": ""},
"EKS": {"min": "1.29", "max": ""},
"GKE": {"min": "1.29", "max": ""},
"OPENSHIFT": {"min": "4.12", "max": ""}
},
"unit_test": {"min": "1.27", "max": "1.32"}
}

8
.github/kind_versions.json vendored Normal file
View File

@ -0,0 +1,8 @@
[
"v1.32.2",
"v1.31.6",
"v1.30.10",
"v1.29.14",
"v1.28.15",
"v1.27.16"
]

8
.github/openshift_versions.json vendored Normal file
View File

@ -0,0 +1,8 @@
[
"4.18",
"4.17",
"4.16",
"4.15",
"4.14",
"4.12"
]

22
.github/pg_versions.json vendored Normal file
View File

@ -0,0 +1,22 @@
{
"17": [
"17.4",
"17.2"
],
"16": [
"16.8",
"16.6"
],
"15": [
"15.12",
"15.10"
],
"14": [
"14.17",
"14.15"
],
"13": [
"13.20",
"13.18"
]
}

100
.github/postgres-versions-update.py vendored Normal file
View File

@ -0,0 +1,100 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
import re
import pprint
import urllib.request
import json
from packaging import version
from subprocess import check_output
min_supported_major = 13
pg_repo_name = "cloudnative-pg/postgresql"
pg_version_re = re.compile(r"^(\d+)(?:\.\d+|beta\d+|rc\d+|alpha\d+)(-\d+)?$")
pg_versions_file = ".github/pg_versions.json"
def get_json(repo_name):
data = check_output([
"docker",
"run",
"--rm",
"quay.io/skopeo/stable",
"list-tags",
"docker://ghcr.io/{}".format(pg_repo_name)])
repo_json = json.loads(data.decode("utf-8"))
return repo_json
def is_pre_release(v):
return version.Version(v).is_prerelease
def write_json(repo_url, version_re, output_file):
repo_json = get_json(repo_url)
tags = repo_json["Tags"]
# Filter out all the tags which do not match the version regexp
tags = [item for item in tags if version_re.search(item)]
# Sort the tags according to semantic versioning
tags.sort(key=version.Version, reverse=True)
results = {}
extra_results = {}
for item in tags:
match = version_re.search(item)
if not match:
continue
major = match.group(1)
# Skip too old versions
if int(major) < min_supported_major:
continue
# We normally want to handle only versions without the '-' inside
extra = match.group(2)
if not extra:
if major not in results:
results[major] = [item]
elif len(results[major]) < 2:
results[major].append(item)
# But we keep the highest version with the '-' in case we have not enough other versions
else:
if major not in extra_results:
extra_results[major] = item
# If there are not enough version without '-` inside we add the one we kept
for major in results:
if len(results[major]) < 2:
results[major].append(extra_results[major])
# You cannot update between pre-release versions. If one of the two values is a pre-release
# make sure to update between two different names of the most recent version (it might be a release)
elif is_pre_release(results[major][0]) or is_pre_release(results[major][1]):
results[major] = [results[major][0], extra_results[major]]
with open(output_file, "w") as json_file:
json.dump(results, json_file, indent=2)
if __name__ == "__main__":
# PostgreSQL JSON file generator with Versions like x.y
write_json(pg_repo_name, pg_version_re, pg_versions_file)

467
.github/renovate.json5 vendored Normal file
View File

@ -0,0 +1,467 @@
{
$schema: 'https://docs.renovatebot.com/renovate-schema.json',
extends: [
'config:recommended',
],
rebaseWhen: 'never',
prConcurrentLimit: 5,
baseBranches: [
'main',
'release-1.22',
'release-1.24',
'release-1.25'
],
ignorePaths: [
'docs/**',
'releases/**',
'contribute/**',
'licenses/**',
'pkg/versions/**',
],
postUpdateOptions: [
'gomodTidy',
],
semanticCommits: 'enabled',
labels: [
'automated',
'do not backport',
'no-issue',
],
customManagers: [
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'KUSTOMIZE_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'go',
depNameTemplate: 'sigs.k8s.io/kustomize/kustomize/v5',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'CONTROLLER_TOOLS_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'go',
depNameTemplate: 'sigs.k8s.io/controller-tools',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'GENREF_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'go',
depNameTemplate: 'github.com/kubernetes-sigs/reference-docs/genref',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'GORELEASER_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'go',
versioningTemplate: 'loose',
depNameTemplate: 'github.com/goreleaser/goreleaser',
},
{
customType: 'regex',
fileMatch: [
'^.github/workflows/continuous-delivery.yml',
'^hack/setup-cluster.sh$',
],
matchStrings: [
'EXTERNAL_SNAPSHOTTER_VERSION: "(?<currentValue>.*?)"',
'EXTERNAL_SNAPSHOTTER_VERSION=(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
versioningTemplate: 'loose',
depNameTemplate: 'kubernetes-csi/external-snapshotter',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^hack/setup-cluster.sh$',
],
matchStrings: [
'EXTERNAL_PROVISIONER_VERSION=(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
versioningTemplate: 'loose',
depNameTemplate: 'kubernetes-csi/external-provisioner',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^hack/setup-cluster.sh$',
],
matchStrings: [
'EXTERNAL_RESIZER_VERSION=(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
versioningTemplate: 'loose',
depNameTemplate: 'kubernetes-csi/external-resizer',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^hack/setup-cluster.sh$',
],
matchStrings: [
'EXTERNAL_ATTACHER_VERSION=(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
versioningTemplate: 'loose',
depNameTemplate: 'kubernetes-csi/external-attacher',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^hack/setup-cluster.sh$',
],
matchStrings: [
'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
versioningTemplate: 'loose',
depNameTemplate: 'kubernetes-csi/csi-driver-host-path',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^.github/workflows/continuous-delivery.yml',
],
matchStrings: [
'ROOK_VERSION: "(?<currentValue>.*?)"',
],
datasourceTemplate: 'github-releases',
versioningTemplate: 'loose',
depNameTemplate: 'rook/rook',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^.github/workflows/continuous-delivery.yml',
'^.github/workflows/continuous-integration.yml',
],
matchStrings: [
'KIND_VERSION: "(?<currentValue>.*?)"',
],
datasourceTemplate: 'github-tags',
depNameTemplate: 'kubernetes-sigs/kind',
},
{
customType: 'regex',
fileMatch: [
'^hack/setup-cluster.sh$',
'^hack/e2e/run-e2e-kind.sh$',
],
matchStrings: [
'KIND_NODE_DEFAULT_VERSION=(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'docker',
versioningTemplate: 'loose',
depNameTemplate: 'kindest/node',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'SPELLCHECK_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'docker',
versioningTemplate: 'loose',
depNameTemplate: 'jonasbn/github-action-spellcheck',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'WOKE_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'docker',
versioningTemplate: 'loose',
depNameTemplate: 'getwoke/woke',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'OPERATOR_SDK_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
depNameTemplate: 'operator-framework/operator-sdk',
versioningTemplate: 'loose',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'OPM_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
depNameTemplate: 'operator-framework/operator-registry',
versioningTemplate: 'loose',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^Makefile$',
],
matchStrings: [
'PREFLIGHT_VERSION \\?= (?<currentValue>.*?)\\n',
],
datasourceTemplate: 'github-releases',
depNameTemplate: 'redhat-openshift-ecosystem/openshift-preflight',
versioningTemplate: 'loose',
extractVersionTemplate: '^(?<version>\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$',
'^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$',
],
matchStrings: [
'image: quay.io/operator-framework/scorecard-test:(?<currentValue>.*?)\\n',
],
datasourceTemplate: 'docker',
versioningTemplate: 'loose',
depNameTemplate: 'quay.io/operator-framework/scorecard-test',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^pkg\\/versions\\/versions\\.go$',
'^pkg\\/specs\\/pgbouncer\\/deployments\\.go$',
],
matchStrings: [
'DefaultImageName = "(?<depName>.+?):(?<currentValue>.*?)"\\n',
'DefaultPgbouncerImage = "(?<depName>.+?):(?<currentValue>.*?)"\\n',
],
datasourceTemplate: 'docker',
versioningTemplate: 'loose',
},
{
customType: 'regex',
fileMatch: [
'^\\.github\\/workflows\\/[^/]+\\.ya?ml$',
],
matchStrings: [
'GOLANG_VERSION: "(?<currentValue>.*?)\\.x"',
],
datasourceTemplate: 'golang-version',
depNameTemplate: 'golang',
versioningTemplate: 'loose',
extractVersionTemplate: '^(?<version>\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^\\.github\\/workflows\\/[^/]+\\.ya?ml$',
],
matchStrings: [
'GOLANGCI_LINT_VERSION: "v(?<currentValue>.*?)"',
],
datasourceTemplate: 'github-releases',
depNameTemplate: 'golangci/golangci-lint',
versioningTemplate: 'loose',
extractVersionTemplate: '^v(?<version>\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^.github/workflows/continuous-delivery.yml',
],
matchStrings: [
'VELERO_VERSION: "v(?<currentValue>.*?)"',
],
datasourceTemplate: 'github-releases',
depNameTemplate: 'vmware-tanzu/velero',
versioningTemplate: 'loose',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
{
customType: 'regex',
fileMatch: [
'^.github/workflows/continuous-delivery.yml',
],
matchStrings: [
'VELERO_AWS_PLUGIN_VERSION: "v(?<currentValue>.*?)"',
],
datasourceTemplate: 'github-releases',
depNameTemplate: 'vmware-tanzu/velero-plugin-for-aws',
versioningTemplate: 'loose',
extractVersionTemplate: '^(?<version>v\\d+\\.\\d+\\.\\d+)',
},
],
packageRules: [
{
matchDatasources: [
'docker',
],
allowedVersions: '!/alpha/',
},
{
matchDatasources: [
'go',
],
matchDepNames: [
'k8s.io/client-go',
],
allowedVersions: '<1.0',
},
{
matchDatasources: [
'go',
],
groupName: 'kubernetes patches',
matchUpdateTypes: [
'patch',
'digest',
],
matchPackageNames: [
'k8s.io{/,}**',
'sigs.k8s.io{/,}**',
'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**',
],
matchDepNames: [
'!sigs.k8s.io/kustomize/kustomize/v5',
'!sigs.k8s.io/controller-tools',
],
},
{
matchDatasources: [
'go',
],
matchUpdateTypes: [
'major',
'minor',
],
matchPackageNames: [
'k8s.io{/,}**',
'sigs.k8s.io{/,}**',
'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**',
],
},
{
matchDatasources: [
'go',
],
matchUpdateTypes: [
'major',
],
matchPackageNames: [
'*',
'!k8s.io{/,}**',
'!sigs.k8s.io{/,}**',
'!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**',
],
},
{
matchDatasources: [
'go',
],
matchUpdateTypes: [
'minor',
'patch',
'digest',
],
groupName: 'all non-major go dependencies',
matchPackageNames: [
'*',
'!k8s.io{/,}**',
'!sigs.k8s.io{/,}**',
'!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**',
'!github.com/cloudnative-pg/{/,}**',
],
},
{
matchDepTypes: [
'action',
],
matchUpdateTypes: [
'minor',
'patch',
],
groupName: 'all non-major github action',
pinDigests: false,
},
{
matchDepTypes: [
'action',
],
pinDigests: false,
},
{
groupName: 'kubernetes CSI',
separateMajorMinor: false,
pinDigests: false,
matchPackageNames: [
'kubernetes-csi{/,}**',
'rook{/,}**',
],
},
{
groupName: 'backup test tools',
separateMajorMinor: false,
pinDigests: false,
matchPackageNames: [
'vmware-tanzu{/,}**',
],
},
{
groupName: 'operator framework',
separateMajorMinor: false,
pinDigests: false,
matchPackageNames: [
'operator-framework{/,}**',
'redhat-openshift-ecosystem{/,}**',
'quay.io/operator-framework{/,}**',
],
},
{
groupName: 'spellcheck',
separateMajorMinor: false,
pinDigests: false,
matchPackageNames: [
'jonasbn/github-action-spellcheck{/,}**',
'rojopolis/spellcheck-github-actions{/,}**',
],
},
{
groupName: 'cnpg',
matchPackageNames: [
'github.com/cloudnative-pg/',
],
separateMajorMinor: false,
pinDigests: false,
},
],
}

86
.github/report-failed-test.sh vendored Normal file
View File

@ -0,0 +1,86 @@
#!/usr/bin/env bash
##
## Copyright © contributors to CloudNativePG, established as
## CloudNativePG a Series of LF Projects, LLC.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## SPDX-License-Identifier: Apache-2.0
##
echo '::echo::off'
colorBoldRed='\033[1;31m'
colorWhite='\033[37m'
colorBoldWhite='\033[1;37m'
colorGreen='\033[0;32m'
indent() {
local indent=1
if [ -n "$1" ]; then indent=$1; fi
pr -to "${indent}"
}
function failure_summary {
# Number of failures
cnt=0
highlight_color=${colorBoldRed}
printf "${highlight_color}%s\n\n" "Summarizing Failure(s):"
filter_file="hack/e2e/filter-failures.jq"
summary="Failure(s) Found!"
for ff in "tests/e2e/out/upgrade_report.json" "tests/e2e/out/report.json"
do
# the upgrade_report.json file may not exist depending on the test level
if [ ! -f $ff ] && [ $ff = "tests/e2e/out/upgrade_report.json" ]; then
continue
fi
while read -rs failure; do
desc=$(printf "%s" "${failure}" | jq -r -C '. | .Test')
err=$(printf "%s" "${failure}" | jq -r -C '. | .Error')
indented_err=$(echo "${err}" | indent 20)
location=$(printf "%s" "${failure}" | jq -r -C '. | (.File + ":" + .Line)')
stack=$(printf "%s" "${failure}" | jq -r .Stack)
indented_stack=$(echo "${stack}" | indent 18)
printf "${colorGreen}%-20s" "Spec Description: "
printf "${colorBoldWhite}%s\n" "${desc}"
printf "${colorGreen}%-20s\n" "Error Description:"
printf "${highlight_color}%s${highlight_color}\n" "${indented_err}"
printf "${colorGreen}%-20s" "Code Location:"
printf "${colorWhite}%s\n" "${location}"
echo
## The below line will print an annotation
## on the relevant source code line of the
## test that has failed. The annotation will
## be printed in the "changed files" tab of
## the Pull Request. We are commenting this
## to avoid generating noise when tests fail
## during workflows of PRs unrelated to that
## specific test.
# echo "$failure" | jq -r '. | "::notice file=" + .File + ",line=" + .Line + "::" + (.Error | @json )'
echo "::group::Stack Trace:"
echo "${indented_stack}"
echo "::endgroup::"
(( cnt+=1 ))
echo
echo "-----"
done < <(jq -c -f "${filter_file}" $ff)
done
printf "${highlight_color}%d ${summary}\n\n" "${cnt}"
echo "------------------------------"
echo
}
failure_summary

265
.github/vpc_destroy.py vendored Normal file
View File

@ -0,0 +1,265 @@
#!/usr/bin/env python
# description : Delete a specific AWS VPC with boto3
# author : Jeff Loughridge
# e-mail : jeffl@konekti.us
# create date : Mar 1, 2019
# modify date : Nov 02, 2021
# credits to https://github.com/jeffbrl/aws-vpc-destroy
import argparse
import logging
import os
import time
import sys
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger("root")
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
def destroy_ec2(vpc_id, aws_region):
logger.debug(f"{vpc_id}")
ec2 = boto3.resource("ec2", region_name=aws_region)
ec2client = ec2.meta.client
# test for valid credentials
try:
ec2client.describe_vpcs()
except ClientError as e:
logging.info(e)
print(
"Either your credentials are invalid or your IAM user doesn't have permissions to list VPCs"
)
sys.exit(1)
if not vpc_exists(ec2client, vpc_id):
print(f"VPC {vpc_id} does not exist in {aws_region}")
return
vpc = ec2.Vpc(vpc_id)
# disassociate EIPs and release EIPs from EC2 instances
for subnet in vpc.subnets.all():
for instance in subnet.instances.all():
filters = [{"Name": "instance-id", "Values": [instance.id]}]
eips = ec2client.describe_addresses(Filters=filters)["Addresses"]
for eip in eips:
ec2client.disassociate_address(AssociationId=eip["AssociationId"])
ec2client.release_address(AllocationId=eip["AllocationId"])
# delete instances
filters = [
{"Name": "instance-state-name", "Values": ["running"]},
{"Name": "vpc-id", "Values": [vpc_id]},
]
ec2_instances = ec2client.describe_instances(Filters=filters)
instance_ids = []
for reservation in ec2_instances["Reservations"]:
instance_ids += [
instance["InstanceId"] for instance in reservation["Instances"]
]
logger.info(f"instance deletion list: {instance_ids}")
if instance_ids:
logging.info("Waiting for instances to terminate")
waiter = ec2client.get_waiter("instance_terminated")
responce = ec2client.terminate_instances(InstanceIds=instance_ids)
logging.info(f"Response: {responce}")
waiter.wait(InstanceIds=instance_ids)
def destroy_services(vpc_id, aws_region, services):
services_map = {"ec2": destroy_ec2}
for service in services.split(","):
try:
services_map[service](vpc_id, aws_region)
except KeyError:
logger.error(f"destroying {service} not implemented")
def vpc_exists(ec2client, vpc_id):
try:
ec2client.describe_vpcs(VpcIds=[vpc_id])
except ClientError as e:
logging.info(e)
return False
return True
def delete_vpc(vpc_id, aws_region, release_eips=False):
ec2 = boto3.resource("ec2", region_name=aws_region)
ec2client = ec2.meta.client
if not vpc_exists(ec2client, vpc_id):
print(f"VPC {vpc_id} does not exist in {aws_region}")
return False
# Exit cleanly if user did to specify at command line to delete EC2 instances for
# a VPC with runnining instances
filters = [
{"Name": "instance-state-name", "Values": ["running"]},
{"Name": "vpc-id", "Values": [vpc_id]},
]
if ec2client.describe_instances(Filters=filters)["Reservations"]:
print(
f"Running EC2 instances exist in {vpc_id}. Please use --services ec2 to invoke the program."
)
return False
vpc = ec2.Vpc(vpc_id)
# delete transit gateway attachment for this vpc
# note - this only handles vpc attachments, not vpn
for attachment in ec2client.describe_transit_gateway_attachments()[
"TransitGatewayAttachments"
]:
if attachment["ResourceId"] == vpc_id:
ec2client.delete_transit_gateway_vpc_attachment(
TransitGatewayAttachmentId=attachment["TransitGatewayAttachmentId"]
)
# delete NAT Gateways
# attached ENIs are automatically deleted
# EIPs are disassociated but not released
filters = [{"Name": "vpc-id", "Values": [vpc_id]}]
for nat_gateway in ec2client.describe_nat_gateways(Filters=filters)["NatGateways"]:
ec2client.delete_nat_gateway(NatGatewayId=nat_gateway["NatGatewayId"])
# detach default dhcp_options if associated with the vpc
dhcp_options_default = ec2.DhcpOptions("default")
if dhcp_options_default:
dhcp_options_default.associate_with_vpc(VpcId=vpc.id)
# delete any vpc peering connections
for vpc_peer in ec2client.describe_vpc_peering_connections()[
"VpcPeeringConnections"
]:
if vpc_peer["AccepterVpcInfo"]["VpcId"] == vpc_id:
ec2.VpcPeeringConnection(vpc_peer["VpcPeeringConnectionId"]).delete()
if vpc_peer["RequesterVpcInfo"]["VpcId"] == vpc_id:
ec2.VpcPeeringConnection(vpc_peer["VpcPeeringConnectionId"]).delete()
# delete our endpoints
for ep in ec2client.describe_vpc_endpoints(
Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
)["VpcEndpoints"]:
ec2client.delete_vpc_endpoints(VpcEndpointIds=[ep["VpcEndpointId"]])
# delete custom NACLs
for netacl in vpc.network_acls.all():
if not netacl.is_default:
netacl.delete()
# ensure ENIs are deleted before proceding
timeout = time.time() + 300
filter = [{"Name": "vpc-id", "Values": [vpc_id]}]
logger.info(f"proceed with deleting ENIs")
reached_timeout = True
while time.time() < timeout:
if not ec2client.describe_network_interfaces(Filters=filters)[
"NetworkInterfaces"
]:
logger.info(f"no ENIs remaining")
reached_timeout = False
break
else:
logger.info(f"waiting on ENIs to delete")
client = boto3.client('ec2')
ec2resouce = boto3.resource('ec2')
for ni in ec2client.describe_network_interfaces(Filters=filters)["NetworkInterfaces"]:
print(ni["NetworkInterfaceId"])
network_interface = ec2resouce.NetworkInterface(ni["NetworkInterfaceId"])
if "AttachmentId" in ni:
network_interface.detach(Force=True)
time.sleep(10)
network_interface.delete()
time.sleep(30)
if reached_timeout:
logger.info(f"ENI deletion timed out")
# delete subnets
for subnet in vpc.subnets.all():
for interface in subnet.network_interfaces.all():
interface.delete()
subnet.delete()
# Delete routes, associations, and routing tables
filter = [{"Name": "vpc-id", "Values": [vpc_id]}]
route_tables = ec2client.describe_route_tables(Filters=filter)["RouteTables"]
for route_table in route_tables:
for route in route_table["Routes"]:
if route["Origin"] == "CreateRoute":
ec2client.delete_route(
RouteTableId=route_table["RouteTableId"],
DestinationCidrBlock=route["DestinationCidrBlock"],
)
for association in route_table["Associations"]:
if not association["Main"]:
ec2client.disassociate_route_table(
AssociationId=association["RouteTableAssociationId"]
)
ec2client.delete_route_table(
RouteTableId=route_table["RouteTableId"]
)
# delete routing tables without associations
for route_table in route_tables:
if route_table["Associations"] == []:
ec2client.delete_route_table(RouteTableId=route_table["RouteTableId"])
# destroy NAT gateways
filters = [{"Name": "vpc-id", "Values": [vpc_id]}]
nat_gateway_ids = [
nat_gateway["NatGatewayId"]
for nat_gateway in ec2client.describe_nat_gateways(Filters=filters)[
"NatGateways"
]
]
for nat_gateway_id in nat_gateway_ids:
ec2client.delete_nat_gateway(NatGatewayId=nat_gateway_id)
# detach and delete all IGWs associated with the vpc
for gw in vpc.internet_gateways.all():
vpc.detach_internet_gateway(InternetGatewayId=gw.id)
gw.delete()
# delete custom security groups
for sg in vpc.security_groups.all():
if sg.group_name != "default":
sg.delete()
ec2client.delete_vpc(VpcId=vpc_id)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
required.add_argument("--vpc_id", required=True, help="Please include your vpc_id")
optional.add_argument(
"--services", help="comma-separated list of AWS services to tear down"
)
optional.add_argument("--region", help="AWS region")
args = parser.parse_args()
if args.region:
aws_region = args.region
else:
aws_region = os.environ["AWS_DEFAULT_REGION"]
vpc_id = args.vpc_id
print(f"type: {type(vpc_id)}")
if args.services:
logger.info(f"calling destroy_services with {args.services}")
destroy_services(args.vpc_id, aws_region, args.services)
logger.info(f"calling delete_vpc with {vpc_id}")
if delete_vpc(vpc_id=vpc_id, aws_region=aws_region, release_eips=False):
print(f"destroyed {vpc_id} in {aws_region}")
else:
print(f"unable to destroy {vpc_id} in {aws_region}")

165
.github/workflows/backport.yml vendored Normal file
View File

@ -0,0 +1,165 @@
name: Backport Pull Request
on:
pull_request_target:
types:
- closed
- opened
- reopened
branches:
- main
env:
GOLANG_VERSION: "1.24.x"
jobs:
# Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default
# we backport everything, except those PR that are created or contain `do not backport` explicitly.
label-source-pr:
name: Add labels to PR
if: |
github.event.pull_request.merged == false &&
!contains(github.event.pull_request.labels.*.name, 'backport-requested') &&
!contains(github.event.pull_request.labels.*.name, 'do not backport')
runs-on: ubuntu-24.04
steps:
-
name: Label the pull request
uses: actions-ecosystem/action-add-labels@v1
if: ${{ !contains(github.event.pull_request.labels.*.name, 'do not backport') }}
with:
github_token: ${{ secrets.REPO_GHA_PAT }}
number: ${{ github.event.pull_request.number }}
labels: |
backport-requested :arrow_backward:
release-1.22
release-1.24
release-1.25
-
name: Create comment
uses: peter-evans/create-or-update-comment@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.pull_request.number }}
body: |
:exclamation: By default, the pull request is configured to backport to all release branches.
- To stop backporting this pr, remove the label: backport-requested :arrow_backward: or add the label 'do not backport'
- To stop backporting this pr to a certain release branch, remove the specific branch label: release-x.y
reactions: heart
-
name: Remove redundant labels
uses: actions-ecosystem/action-remove-labels@v1
if: ${{ contains(github.event.pull_request.labels.*.name, 'do not backport') }}
with:
github_token: ${{ secrets.REPO_GHA_PAT }}
labels: |
backport-requested :arrow_backward:
release-1.22
release-1.24
release-1.25
## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels
back-porting-pr:
name: Backport to release branches
if: |
github.event.pull_request.merged == true &&
(
contains(github.event.pull_request.labels.*.name, 'backport-requested') ||
contains(github.event.pull_request.labels.*.name, 'backport-requested :arrow_backward:')
) &&
!contains(github.event.pull_request.labels.*.name, 'do not backport')
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
branch: [release-1.22, release-1.24, release-1.25]
env:
PR: ${{ github.event.pull_request.number }}
outputs:
commit: ${{ steps.check_commits.outputs.commit }}
steps:
-
name: Checkout code
if: contains( github.event.pull_request.labels.*.name, matrix.branch )
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ matrix.branch }}
token: ${{ secrets.REPO_GHA_PAT }}
-
name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
-
name: Check commits
if: contains( github.event.pull_request.labels.*.name, matrix.branch )
id: check_commits
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
commit=$(gh pr view ${PR} --json mergeCommit -q ".mergeCommit.oid" 2>/dev/null || :)
if [ -z "${commit}" ]
then
echo "No commit found!"
exit 0
fi
echo "commit=${commit}" >> $GITHUB_OUTPUT
echo "cherry-pick commit ${commit} to branch ${{ matrix.branch }}"
author_name=$(git show -s --format='%an' "${commit}")
echo "AUTHOR_NAME=${author_name}" >> $GITHUB_ENV
author_email=$(git show -s --format='%ae' "${commit}")
echo "AUTHOR_EMAIL=${author_email}" >> $GITHUB_ENV
-
name: cherry pick
env:
COMMIT: ${{ steps.check_commits.outputs.commit }}
if: |
contains( github.event.pull_request.labels.*.name, matrix.branch ) && env.COMMIT != ''
run: |
git config user.email "${{ env.AUTHOR_EMAIL }}"
git config user.name "${{ env.AUTHOR_NAME }}"
git fetch
git cherry-pick -x --mainline 1 ${{ env.COMMIT }}
make fmt vet generate apidoc wordlist-ordered
if ! git diff --exit-code --quiet
then
echo "!!! Generated files need manually handling"
exit 1
fi
git push
create-tickets:
name: Create tickets for failures
needs:
- back-porting-pr
if: |
failure() && !cancelled() &&
needs.back-porting-pr.outputs.commit != ''
env:
PR: ${{ github.event.pull_request.number }}
COMMIT: ${{ needs.back-porting-pr.outputs.commit }}
runs-on: ubuntu-24.04
steps:
- name: create ticket
uses: dacbd/create-issue-action@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
title: Backport failure for pull request ${{ env.PR }}
labels: backport failure
body: |
### Context
Automatically backport failure for pull request ${{ env.PR }}
Pull request: ${{ github.server_url }}/${{ github.repository }}/pull/${{ env.PR }}
Commit: ${{ github.server_url }}/${{ github.repository }}/commit/${{ env.COMMIT }}
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
To solve the ticket, open the workflow link above, and for each failed release branch check the following:
1. Whether the commit should be `cherry-pick`(ed) to this release branch, otherwise skip this release branch
2. If yes, `cherry-pick` the commit manually and push it to the release branch. You may need to resolve the
conflicts and issue `cherry-pick --continue` again. Also, a dependent commit missing may be causing the
failure, so if that's the case you may need to `cherry-pick` the dependent commit first.

29
.github/workflows/chatops.yml vendored Normal file
View File

@ -0,0 +1,29 @@
# This workflow adds support for custom "slash commands" in the CI/CD pipeline.
# It is triggered by comments made on pull requests.
#
# 1. If the comment starts with "/ok-to-merge", it will label the PR with
# "ok to merge :ok_hand:"
#
name: slash-command
on:
issue_comment:
types: [created]
jobs:
ok-to-merge:
if: |
github.event.issue.pull_request &&
startsWith(github.event.comment.body, '/ok-to-merge')
runs-on: ubuntu-24.04
steps:
- name: Check User Permission
id: checkUser
uses: actions-cool/check-user-permission@v2
with:
require: 'write'
- name: Add "ok to merge :ok_hand:" label to PR
uses: actions-ecosystem/action-add-labels@v1.1.3
if: steps.checkUser.outputs.require-result == 'true'
with:
github_token: ${{ secrets.REPO_GHA_PAT }}
labels: "ok to merge :ok_hand:"

View File

@ -0,0 +1,24 @@
# See https://github.com/marketplace/actions/close-stale-issues
name: Close inactive issues
on:
workflow_dispatch:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
#pull-requests: write
steps:
- uses: actions/stale@v9
with:
days-before-issue-stale: 60
days-before-issue-close: 14
stale-issue-message: "This issue is stale because it has been open for 60 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1
ascending: true
exempt-issue-labels: "no-stale"

96
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,96 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
paths-ignore:
- 'docs/**'
- '**/*.md'
- '**/*.txt'
- '**/*.yml'
- '**/*.yaml'
- '**/*.yaml.template'
- '**/*.py'
- '**/*.sh'
- 'hack/**'
schedule:
- cron: '24 0 * * 5'
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.24.x"
jobs:
duplicate_runs:
runs-on: ubuntu-24.04
name: Skip duplicate runs
continue-on-error: true
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5.3.1
with:
concurrent_skipping: 'same_content'
skip_after_successful_duplicate: 'true'
do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]'
analyze:
name: Analyze
runs-on: ubuntu-latest
needs:
- duplicate_runs
# Run Go linter only if Go code has changed
if: |
needs.duplicate_runs.outputs.should_skip != 'true'
permissions:
actions: read
contents: read
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: "go"
build-mode: manual
config: |
paths-ignore:
- licenses/
- tests/
# Even if we build manually the testing those will not be included in the CodeQL scan
# We will always have less files, this improves the speed of the test since it will
# not try to build running multiple commands.
- name: Build
run: |
make
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:go"

2238
.github/workflows/continuous-delivery.yml vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,852 @@
# This workflow executes the following actions:
# - Runs Golang and ShellCheck linters
# - Runs Unit tests
# - Verifies API doc and CRDs are up to date
# - Builds the operator image (no push)
name: continuous-integration
on:
push:
branches:
- main
- release-*
pull_request:
workflow_dispatch:
schedule:
- cron: '0 1 * * *'
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.24.x"
GOLANGCI_LINT_VERSION: "v1.64.8"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.27.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
API_DOC_NAME: "cloudnative-pg.v1.md"
SLACK_USERNAME: "cnpg-bot"
# Keep in mind that adding more platforms (architectures) will increase the building
# time even if we use the ghcache for the building process.
PLATFORMS: "linux/amd64,linux/arm64"
BUILD_PUSH_PROVENANCE: ""
BUILD_PUSH_CACHE_FROM: ""
BUILD_PUSH_CACHE_TO: ""
BUILD_PLUGIN_RELEASE_ARGS: "build --skip=validate --clean --id kubectl-cnpg --timeout 60m"
BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager-race"
REPOSITORY_OWNER: "cloudnative-pg"
REGISTRY: "ghcr.io"
REGISTRY_USER: ${{ github.actor }}
REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
SIGN_IMAGES: "true"
OPP_SCRIPT_URL: "https://raw.githubusercontent.com/redhat-openshift-ecosystem/community-operators-pipeline/ci/latest/ci/scripts/opp.sh"
defaults:
run:
# default failure handling for shell scripts in 'run' steps
shell: 'bash -Eeuo pipefail -x {0}'
jobs:
# Trigger the workflow on release-* branches for smoke testing whenever it's a scheduled run.
# Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch
smoke_test_release_branches:
runs-on: ubuntu-24.04
name: smoke test release-* branches when it's a scheduled run
if: github.event_name == 'schedule'
strategy:
fail-fast: false
matrix:
branch: [release-1.22, release-1.24, release-1.25]
steps:
- name: Invoke workflow with inputs
uses: benc-uk/workflow-dispatch@v1
with:
workflow: continuous-integration
ref: ${{ matrix.branch }}
# Detects if we should skip the workflow due to being duplicated. Exceptions:
# 1. it's on 'main' branch
# 2. it's triggered by events in the 'do_not_skip' list
duplicate_runs:
runs-on: ubuntu-24.04
name: Skip duplicate runs
continue-on-error: true
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5.3.1
with:
concurrent_skipping: 'same_content'
skip_after_successful_duplicate: 'true'
paths_ignore: '["README.md", "docs/**"]'
do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]'
# Classify codebase changes along 5 different dimensions based on the files
# changed in the commit/PR, and create 5 different filters which are used in
# the following jobs to decide whether the step should be skipped.
change-triage:
name: Check changed files
needs: duplicate_runs
if: ${{ needs.duplicate_runs.outputs.should_skip != 'true' }}
runs-on: ubuntu-24.04
outputs:
docs-changed: ${{ steps.filter.outputs.docs-changed }}
operator-changed: ${{ steps.filter.outputs.operator-changed }}
test-changed: ${{ steps.filter.outputs.test-changed }}
shell-script-changed: ${{ steps.filter.outputs.shell-script-changed }}
go-code-changed: ${{ steps.filter.outputs.go-code-changed }}
renovate-changed: ${{ steps.filter.outputs.renovate-changed }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check for changes
uses: dorny/paths-filter@v3.0.2
id: filter
# Remember to add new folders in the operator-changed filter if needed
with:
base: ${{ (github.event_name == 'schedule') && 'main' || '' }}
filters: |
docs-changed:
- '**/*.md'
- 'docs/**'
- '.wordlist-en-custom.txt'
operator-changed:
- 'api/**'
- 'cmd/**'
- 'config/**'
- 'internal/**'
- 'licenses/**'
- 'pkg/**'
- '.github/workflows/continuous-delivery.yml'
- '.github/workflows/continuous-integration.yml'
- '.goreleaser*.yml'
- 'Dockerfile'
- 'docker-bake.hcl'
- 'Makefile'
- 'go.mod'
- 'go.sum'
test-changed:
- '.github/e2e-matrix-generator.py'
- '.github/generate-test-artifacts.py'
- 'tests/**'
- 'hack/**'
shell-script-changed:
- '**/*.sh'
go-code-changed:
- '**/*.go'
- '.golangci.yml'
renovate-changed:
- '.github/renovate.json5'
go-linters:
name: Run linters
needs:
- duplicate_runs
- change-triage
# We need always run linter as go linter is a required check
if: needs.duplicate_runs.outputs.should_skip != 'true'
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
# Disable setup-go caching. Cache is better handled by the golangci-lint action
cache: false
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: ${{ env.GOLANGCI_LINT_VERSION }}
- name: Check go mod tidy has no pending changes
run: |
make go-mod-check
renovate-linter:
name: Renovate Linter
needs:
- duplicate_runs
- change-triage
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
needs.change-triage.outputs.renovate-changed == 'true'
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Validate Renovate JSON
run: npx --yes --package renovate -- renovate-config-validator
go-vulncheck:
name: Run govulncheck
needs:
- duplicate_runs
- change-triage
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
(
needs.change-triage.outputs.operator-changed == 'true' ||
needs.change-triage.outputs.go-code-changed == 'true'
)
runs-on: ubuntu-24.04
steps:
- name: Run govulncheck
uses: golang/govulncheck-action@v1
with:
go-version-input: ${{ env.GOLANG_VERSION }}
check-latest: true
shellcheck:
name: Run shellcheck linter
needs:
- duplicate_runs
- change-triage
# Run shellcheck linter only if shell code has changed
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
needs.change-triage.outputs.shell-script-changed == 'true'
runs-on: ubuntu-24.04
env:
SHELLCHECK_OPTS: -a -S style
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@2.0.0
generate-unit-tests-jobs:
name: Generate jobs for unit tests
needs:
- duplicate_runs
- change-triage
# Generate unit tests jobs only if the operator or the Go codebase have changed
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
(
needs.change-triage.outputs.operator-changed == 'true' ||
needs.change-triage.outputs.go-code-changed == 'true'
)
runs-on: ubuntu-24.04
outputs:
k8sMatrix: ${{ steps.get-k8s-versions.outputs.k8s_versions }}
latest_k8s_version: ${{ steps.get-k8s-versions.outputs.latest_k8s_version }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Get k8s versions for unit test
id: get-k8s-versions
shell: bash
run: |
k8s_versions=$(jq -c '
.unit_test.max as $max |
.unit_test.min as $min |
$min | [ while(. <= $max;
. | split(".") | .[1] |= (.|tonumber|.+1|tostring) | join(".")
)
] |
.[] |= .+".x"
' < .github/k8s_versions_scope.json)
echo "k8s_versions=${k8s_versions}" >> $GITHUB_OUTPUT
latest_k8s_version=$(jq -r '.|last' <<< $k8s_versions)
echo "latest_k8s_version=${latest_k8s_version}" >> $GITHUB_OUTPUT
tests:
name: Run unit tests
needs:
- duplicate_runs
- change-triage
- generate-unit-tests-jobs
# Run unit tests only if the operator or the Go codebase have changed
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
(
needs.change-triage.outputs.operator-changed == 'true' ||
needs.change-triage.outputs.go-code-changed == 'true'
)
runs-on: ubuntu-24.04
strategy:
matrix:
# The Unit test is performed per multiple supported k8s versions (each job for each k8s version) as below:
k8s-version: ${{ fromJSON(needs.generate-unit-tests-jobs.outputs.k8sMatrix) }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Run unit tests
env:
ENVTEST_K8S_VERSION: ${{ matrix.k8s-version }}
run: |
make test
- name: Coverage Summary
if: matrix.k8s-version == needs.generate-unit-tests-jobs.outputs.latest_k8s_version
run: |
go tool cover -func=cover.out -o coverage.out
- name: Publish unit test summary on the latest k8s version
if: matrix.k8s-version == needs.generate-unit-tests-jobs.outputs.latest_k8s_version
run: |
echo "Unit test coverage: $(tail -n 1 coverage.out | awk '{print $3}')" >> $GITHUB_STEP_SUMMARY
apidoc:
name: Verify API doc is up to date
needs:
- duplicate_runs
- change-triage
# Run make apidoc if Go code or docs have changed
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
(
needs.change-triage.outputs.go-code-changed == 'true' ||
needs.change-triage.outputs.docs-changed == 'true'
)
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Run make apidoc
run: |
make apidoc
- name: Verify apidoc changes
run: |
apidoc_file_path='docs/src/${{ env.API_DOC_NAME }}'
if git status --porcelain $apidoc_file_path | grep '^ M'; then
echo "The API documentation doesn't reflect the current API. Please run make apidoc."
exit 1
fi
crd:
name: Verify CRD is up to date
needs:
- duplicate_runs
- change-triage
# Run make manifests if Go code have changed
if: |
needs.duplicate_runs.outputs.should_skip != 'true' &&
(
needs.change-triage.outputs.go-code-changed == 'true' ||
needs.change-triage.outputs.operator-changed == 'true'
)
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Run make manifests
run: |
make manifests
- name: Check CRD manifests are up to date
run: |
crd_path='config/crd'
if git status --porcelain $crd_path | grep '^ M'; then
echo "The CRD manifests do not reflect the current API. Please run make manifests."
exit 1
fi
buildx:
name: Build containers
needs:
- go-linters
- shellcheck
- tests
- apidoc
- crd
- duplicate_runs
- change-triage
# Build containers:
# if there have been any code changes OR it is a scheduled execution
# AND
# none of the preceding jobs failed
if: |
(always() && !cancelled()) &&
(
needs.duplicate_runs.outputs.should_skip != 'true' &&
(
needs.change-triage.outputs.operator-changed == 'true' ||
needs.change-triage.outputs.test-changed == 'true' ||
needs.change-triage.outputs.shell-script-changed == 'true' ||
needs.change-triage.outputs.go-code-changed == 'true'
)
) &&
(needs.go-linters.result == 'success' || needs.go-linters.result == 'skipped') &&
(needs.shellcheck.result == 'success' || needs.shellcheck.result == 'skipped') &&
(needs.tests.result == 'success' || needs.tests.result == 'skipped') &&
(needs.apidoc.result == 'success' || needs.apidoc.result == 'skipped') &&
(needs.crd.result == 'success' || needs.crd.result == 'skipped')
runs-on: ubuntu-24.04
permissions:
actions: read
contents: read
packages: write
security-events: write
id-token: write
outputs:
commit_version: ${{ env.VERSION }}
commit: ${{ env.COMMIT_SHA }}
controller_img: ${{ env.CONTROLLER_IMG }}
controller_img_ubi: ${{ env.CONTROLLER_IMG_UBI }}
bundle_img: ${{ env.BUNDLE_IMG }}
catalog_img: ${{ env.CATALOG_IMG }}
push: ${{ env.PUSH }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# To identify the commit we need the history and all the tags.
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Build meta
id: build-meta
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
commit_sha=${{ github.event.pull_request.head.sha || github.sha }}
commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}" || : )
# use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0)
commit_version=$(git describe --tags --match 'v*' "${commit_sha}"| sed -e 's/^v//; s/-g[0-9a-f]\+$//; s/-\([0-9]\+\)$/-dev\1/')
# shortened commit sha
commit_short=$(git rev-parse --short "${commit_sha}")
# extract branch name
branch_name=${GITHUB_REF#refs/heads/}
if [[ ${{ github.event_name }} == 'pull_request' ]]
then
branch_name=$(gh pr view "${{ github.event.pull_request.number }}" --json headRefName -q '.headRefName' 2>/dev/null)
fi
# extract tag from branch name
tag_name=$(echo "$branch_name" | tr / -)
echo "DATE=${commit_date}" >> $GITHUB_ENV
echo "VERSION=${commit_version}" >> $GITHUB_ENV
echo "COMMIT=${commit_short}" >> $GITHUB_ENV
echo "COMMIT_SHA=${commit_sha}" >> $GITHUB_ENV
echo "IMAGE_TAG=${tag_name,,}" >> $GITHUB_ENV
echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV
# By default the container image is being pushed to the registry
echo "PUSH=true" >> $GITHUB_ENV
# GITHUB_TOKEN has restricted permissions if the pull_request has been opened
# from a forked repository, so we avoid pushing to the container registry if
# that's the case.
- name: Evaluate container image push
if: github.event_name == 'pull_request'
env:
BASE_REPO: ${{ github.event.pull_request.base.repo.full_name }}
HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }}
run: |
if [[ "${{ env.HEAD_REPO }}" != "${{ env.BASE_REPO }}" ]]
then
echo "PUSH=false" >> $GITHUB_ENV
fi
- name: Set GoReleaser environment
run: |
echo GOPATH=$(go env GOPATH) >> $GITHUB_ENV
echo PWD=$(pwd) >> $GITHUB_ENV
- name: Run GoReleaser to build kubectl plugin
uses: goreleaser/goreleaser-action@v6
if: |
github.event_name == 'schedule' ||
(
github.event_name == 'workflow_dispatch' &&
startsWith(github.head_ref, 'release-') ||
startsWith(github.ref_name, 'release-')
)
with:
distribution: goreleaser
version: v2
args: ${{ env.BUILD_PLUGIN_RELEASE_ARGS }}
env:
DATE: ${{ env.DATE }}
COMMIT: ${{ env.COMMIT }}
VERSION: ${{ env.VERSION }}
# Send Slack notification if the kubectl plugin build fails.
# To avoid message overflow, we only report runs scheduled on main or release branches
- name: Slack Notification
uses: rtCamp/action-slack-notify@v2
if: |
failure() &&
github.repository_owner == env.REPOSITORY_OWNER &&
(
github.event_name == 'schedule' ||
(
github.event_name == 'workflow_dispatch' &&
startsWith(github.head_ref, 'release-') ||
startsWith(github.ref_name, 'release-')
)
)
env:
SLACK_COLOR: ${{ job.status }}
SLACK_ICON: https://avatars.githubusercontent.com/u/85171364?size=48
SLACK_USERNAME: ${{ env.SLACK_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_MESSAGE: Building kubernetes plugin failed!
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: v2
args: ${{ env.BUILD_MANAGER_RELEASE_ARGS }}
env:
DATE: ${{ env.DATE }}
COMMIT: ${{ env.COMMIT }}
VERSION: ${{ env.VERSION }}
RACE: "true"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: ${{ env.PLATFORMS }}
cache-image: false
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login into docker registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Build and push
uses: docker/bake-action@v6
id: bake-push
env:
environment: "testing"
buildVersion: ${{ env.VERSION }}
tag: ${{ env.IMAGE_TAG }}
registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }}
revision: ${{ env.COMMIT }}
with:
source: .
push: ${{ env.PUSH }}
no-cache: true
targets: "default"
- name: Output images
if: env.PUSH == 'true'
env:
DISTROLESS: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['image.name'] }}
UBI: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }}
run: |
echo "CONTROLLER_IMG=${DISTROLESS}" >> $GITHUB_ENV
echo "CONTROLLER_IMG_UBI=${UBI}" >> $GITHUB_ENV
echo "BUNDLE_IMG=${UBI}-bundle" >> $GITHUB_ENV
echo "CATALOG_IMG=${UBI}-catalog" >> $GITHUB_ENV
- name: Dockle scan distroless image
uses: erzz/dockle-action@v1
if: env.PUSH == 'true'
with:
image: ${{ env.CONTROLLER_IMG }}
exit-code: '1'
failure-threshold: WARN
accept-keywords: key
- name: Dockle scan UBI image
uses: erzz/dockle-action@v1
if: env.PUSH == 'true'
env:
DOCKLE_IGNORES: CIS-DI-0009
with:
image: ${{ env.CONTROLLER_IMG_UBI }}
exit-code: '1'
failure-threshold: WARN
accept-keywords: key
- name: Run Snyk to check Docker image for vulnerabilities
uses: snyk/actions/docker@master
if: |
!github.event.repository.fork &&
!github.event.pull_request.head.repo.fork
continue-on-error: true
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
with:
image: ${{ env.CONTROLLER_IMG }}
args: --severity-threshold=high --file=Dockerfile
- name: Upload result to GitHub Code Scanning
uses: github/codeql-action/upload-sarif@v3
if: |
!github.event.repository.fork &&
!github.event.pull_request.head.repo.fork
continue-on-error: true
with:
sarif_file: snyk.sarif
- name: Install cosign
if: |
env.SIGN_IMAGES == 'true' &&
env.PUSH == 'true'
uses: sigstore/cosign-installer@v3
# See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/
# and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on
# how to use cosign.
- name: Sign images
if: |
env.SIGN_IMAGES == 'true' &&
env.PUSH == 'true'
run: |
images=$(echo '${{ steps.bake-push.outputs.metadata }}' |
jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"'
)
cosign sign --yes ${images}
olm-bundle:
name: Create OLM bundle and catalog
runs-on: ubuntu-24.04
permissions:
contents: read
packages: write
needs:
- buildx
if: |
(always() && !cancelled()) &&
needs.buildx.result == 'success' &&
needs.buildx.outputs.push == 'true'
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ needs.buildx.outputs.commit }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: ${{ env.PLATFORMS }}
cache-image: false
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login into docker registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Create bundle
env:
CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }}
BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }}
CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }}
run: |
make olm-catalog
- name: Archive the bundle manifests
uses: actions/upload-artifact@v4
with:
name: bundle
path: |
bundle.Dockerfile
bundle/
cloudnative-pg-catalog.yaml
retention-days: 7
preflight:
name: Run openshift-preflight test
runs-on: ubuntu-24.04
needs:
- buildx
- olm-bundle
if: |
(always() && !cancelled()) &&
needs.olm-bundle.result == 'success'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Setup tools
run: |
make operator-sdk preflight
- name: Loging to container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Run preflight container test
env:
CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }}
PFLT_ARTIFACTS: "preflight_results"
run: |
bin/preflight check container ${CONTROLLER_IMG} \
--docker-config $HOME/.docker/config.json
- name: Archive the preflight results
uses: actions/upload-artifact@v4
with:
name: preflight_results
path: |
preflight_results
retention-days: 7
- name: Check preflight container results
run: |
for dir in `ls preflight_results`; do
PASS=`jq -r .passed preflight_results/$dir/results.json`
if [[ "$PASS" == "false" ]]
then
exit 1
fi
done
olm-scorecard:
name: Run OLM scorecard test
runs-on: ubuntu-24.04
needs:
- buildx
- olm-bundle
if: |
(always() && !cancelled()) &&
needs.olm-bundle.result == 'success' &&
github.repository_owner == 'cloudnative-pg'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setting up KinD cluster
uses: helm/kind-action@v1.12.0
with:
wait: "600s"
version: ${{ env.KIND_VERSION }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: ${{ env.PLATFORMS }}
cache-image: false
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login into docker registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Running Scorecard tests
env:
BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }}
run: |
make olm-scorecard
olm-tests:
strategy:
fail-fast: false
matrix:
test: [ kiwi, lemon, orange ]
name: Run OLM ${{ matrix.test }} test
runs-on: ubuntu-24.04
needs:
- buildx
- olm-bundle
if: |
(always() && !cancelled()) &&
needs.olm-bundle.result == 'success' &&
github.repository_owner == 'cloudnative-pg'
env:
VERSION: ${{ needs.buildx.outputs.commit_version }}
OPP_DEBUG: 1
OPP_PRODUCTION_TYPE: "k8s"
OPP_CONTAINER_OPT: "-t"
OPP_RELEASE_INDEX_NAME: "catalog_tmp"
steps:
- name: Checkout community-operators
uses: actions/checkout@v4
with:
repository: k8s-operatorhub/community-operators
persist-credentials: false
- name: Login into docker registry
uses: redhat-actions/podman-login@v1
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Download the bundle
uses: actions/download-artifact@v4
with:
name: bundle
- name: Copy bundle in the community-operators
run: |
mkdir -p "operators/cloudnative-pg/${{ env.VERSION }}"
cp -R bundle/* "operators/cloudnative-pg/${{ env.VERSION }}"
rm -fr bundle.Dockerfile *.zip bundle/
- name: Test bundle
run: |
bash <(curl -sL ${{ env.OPP_SCRIPT_URL }}) ${{ matrix.test }} operators/cloudnative-pg/${{ env.VERSION }}/

137
.github/workflows/k8s-versions-check.yml vendored Normal file
View File

@ -0,0 +1,137 @@
# Retrieves a list of supported image versions for the cloud providers and
# creates a PR to update each relative JSON file:
# - Kind (Kubernetes in Docker): kind_versions.json
# - GKE (Google Kubernetes Engine): gke_versions.json
# - AKS (Azure Kubernetes Service): aks_versions.json
# - EKS (Amazon Elastic Kubernetes Service): eks_versions.json
# - OCP (OpenShift): openshift_versions.json
name: k8s-versions-check
on:
schedule:
- cron: "30 0 * * *"
workflow_dispatch:
inputs:
limit:
description: 'Limit to the specified engines list (eks, aks, gke, kind, ocp)'
required: false
permissions:
contents: write
pull-requests: write
issues: read
defaults:
run:
shell: 'bash -Eeuo pipefail -x {0}'
env:
# The minimal k8s version supported, k8s version smaller than this one will be removed from vendor
MINIMAL_K8S: "1.27"
MINIMAL_OCP: "4.12"
jobs:
check-public-clouds-k8s-versions:
runs-on: ubuntu-24.04
steps:
-
name: Checkout code
uses: actions/checkout@v4
-
# There is no command to get EKS k8s versions, we have to parse the documentation
name: Get updated EKS versions
run: |
DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/clusters/kubernetes-versions-standard.adoc"
curl --silent "${DOC_URL}" | sed -e 's/.*Kubernetes \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \
awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \
jq -Rn '[inputs]' | tee .github/eks_versions.json
if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks'
-
name: Azure Login
uses: azure/login@v2.2.0
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks'
-
name: Get updated AKS versions
run: |
az aks get-versions --location westeurope \
--query "reverse(sort(values[? isPreview != 'true' && contains(capabilities.supportPlan, 'KubernetesOfficial')].patchVersions.keys(@)[]))" -o tsv | \
sort -urk 1,1.5 | \
awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \
jq -Rn '[inputs]' | tee .github/aks_versions.json
if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks'
-
name: 'Auth GKE'
uses: 'google-github-actions/auth@v2'
with:
credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}'
if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke'
-
name: Set up Cloud SDK for GKE
uses: google-github-actions/setup-gcloud@v2
with:
project_id: ${{ secrets.GCP_PROJECT_ID }}
if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke'
-
name: Install YQ
uses: frenck/action-setup-yq@v1
if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke'
-
name: Get updated GKE versions
run: |
# Get the valid major versions from all the channels, convert them
# to json and write them to file.
YQEXPR=".validMasterVersions" #wokeignore:rule=master
gcloud container get-server-config --zone europe-west3-a --quiet | \
yq e ${YQEXPR} - | \
cut -d'.' -f '1-2' | \
uniq | \
sed 's/\([[:digit:]]\+\.[[:digit:]]\+\)/"\1"/' | \
yq '.[] | select( . >= strenv(MINIMAL_K8S) )' | \
jq -Rn '[inputs]' | tee .github/gke_versions.json
if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke'
-
name: Get updated kind node version
run : |
# Get the latest valid kind node version, convert them to json
# and write them to a file, starting from the MINIMAL_K8S
for baseversion in $(seq $MINIMAL_K8S 0.01 99); do
URL="https://registry.hub.docker.com/v2/repositories/kindest/node/tags?name=${baseversion}&ordering=last_updated"
v=$(curl -SsL "${URL}" | jq -rc '.results[].name' | grep -v "alpha" | sort -Vr | head -n1) || RC=$?
if [[ -z "${v}" ]]; then
break
fi
echo "${v}"
done | jq -Rs 'split("\n") | map(select(length>0)) | sort | reverse' | tee .github/kind_versions.json
if: github.event.inputs.limit == null || github.event.inputs.limit == 'kind'
-
name: Get updated OpenShift versions
run: |
# We limit the range starting on 4 to 9 to skip the 13 version
# this needs to be updated when the 15 version is also EOL
curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/ | \
grep -e 'href.*"4\.1[24-9]\.[0-9].*"' | \
sed -e 's/\(.*\)href="\(4\.1[2-9]\)\(.*\)/\2/' | \
sort -Vru | \
awk -vv="$MINIMAL_OCP" '$0>=v {print $0}' | \
jq -Rn '[inputs]' | tee .github/openshift_versions.json
OCP_VERSIONS=`cat .github/openshift_versions.json | jq -r '"v"+.[-1]+"-v"+.[0]'`
sed -i -e 's/\(OPENSHIFT_VERSIONS ?= \)\(.*\)/\1'${OCP_VERSIONS}'/g' Makefile
if: github.event.inputs.limit == null || github.event.inputs.limit == 'ocp'
-
name: Create Pull Request if versions have been updated
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.REPO_GHA_PAT }}
title: "feat: Public Cloud K8S versions update"
body: "Update the versions used to test the operator on public cloud providers"
branch: "k8s-cloud-versions-update"
author: "public-cloud-k8s-versions-check <public-cloud-k8s-versions-check@users.noreply.github.com>"
add-paths: |
.github/**
Makefile
commit-message: "feat: Updated public cloud k8s tested versions"
signoff: true

View File

@ -0,0 +1,93 @@
# Checks the latest postgres image from `ghcr.io/cloudnative-pg/postgresql`,
# and if there is a new one, updates the codebase with it
name: latest-postgres-version-check
on:
schedule:
- cron: "30 0 * * *"
workflow_dispatch:
defaults:
run:
shell: "bash -Eeuo pipefail -x {0}"
jobs:
check-latest-postgres-version:
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install Python dependencies
run: |
pip install packaging
- name: Generate PostgreSQL JSON files
run: |
python .github/postgres-versions-update.py
- name: Get the latest version of PostgreSQL Docker image
id: latest
env:
IMAGE_REPO: ghcr.io/cloudnative-pg/postgresql
run: |
LATEST_POSTGRES_VERSION=$(jq -r 'del(.[] | select(.[] | match("alpha|beta|rc"))) | .[keys | max][0]' < .github/pg_versions.json)
LATEST_POSTGRES_VERSION_IMAGE="${IMAGE_REPO}:${LATEST_POSTGRES_VERSION}"
echo "LATEST_POSTGRES_VERSION=$LATEST_POSTGRES_VERSION" >> $GITHUB_ENV
echo "LATEST_POSTGRES_VERSION_IMAGE=$LATEST_POSTGRES_VERSION_IMAGE" >> $GITHUB_ENV
- name: Get the current version of PostgreSQL
id: current
run: |
CURRENT_POSTGRES_VERSION_IMAGE=$(awk -F '"' '/DefaultImageName *=/{print $2}' pkg/versions/versions.go)
CURRENT_POSTGRES_VERSION=${CURRENT_POSTGRES_VERSION_IMAGE##*:}
echo "CURRENT_POSTGRES_VERSION=$CURRENT_POSTGRES_VERSION" >> $GITHUB_ENV
echo "CURRENT_POSTGRES_VERSION_IMAGE=$CURRENT_POSTGRES_VERSION_IMAGE" >> $GITHUB_ENV
- name: Update files to match the latest version of PostgreSQL
if: env.LATEST_POSTGRES_VERSION_IMAGE != env.CURRENT_POSTGRES_VERSION_IMAGE
env:
CURRENT_POSTGRES_VERSION: ${{ env.CURRENT_POSTGRES_VERSION }}
LATEST_POSTGRES_VERSION: ${{ env.LATEST_POSTGRES_VERSION }}
LATEST_POSTGRES_VERSION_IMAGE: ${{ env.LATEST_POSTGRES_VERSION_IMAGE }}
run: |
echo "New PostgreSQL version detected ; updating!"
# Update pkg/versions/versions.go
sed -i '/DefaultImageName *=/s@".*"@"'"${LATEST_POSTGRES_VERSION_IMAGE}"'"@' pkg/versions/versions.go
# Update docs directory (only .md and .yaml filename extensions)
find docs -type f \( -name '*.md' -o -name '*.yaml' \) \! -path '*release_notes*' -exec sed -i "/[ :]${CURRENT_POSTGRES_VERSION//./\\.}/s/${CURRENT_POSTGRES_VERSION//./\\.}/${LATEST_POSTGRES_VERSION}/g" {} +
- name: Create PR to update PostgreSQL version
if: env.LATEST_POSTGRES_VERSION_IMAGE != env.CURRENT_POSTGRES_VERSION_IMAGE
uses: peter-evans/create-pull-request@v7
env:
GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }}
with:
title: "feat: update default PostgreSQL version to ${{ env.LATEST_POSTGRES_VERSION }}"
body: "Update default PostgreSQL version from ${{ env.CURRENT_POSTGRES_VERSION }} to ${{ env.LATEST_POSTGRES_VERSION }}"
branch: "postgres-versions-update"
author: "postgres-versions-updater <postgres-versions-updater@users.noreply.github.com>"
commit-message: "feat: update default PostgreSQL version to ${{ env.LATEST_POSTGRES_VERSION }}"
signoff: true
- name: Create Pull Request if postgresql versions have been updated
if: env.LATEST_POSTGRES_VERSION_IMAGE == env.CURRENT_POSTGRES_VERSION_IMAGE
uses: peter-evans/create-pull-request@v7
env:
GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }}
with:
title: "test: Updated Postgres versions used in E2E tests"
body: "Update the Postgres versions used in E2E tests"
branch: "postgres-versions-update"
author: "postgres-versions-updater <postgres-versions-updater@users.noreply.github.com>"
add-paths: ".github/"
commit-message: "test: Updated Postgres versions used in E2E tests"
signoff: true

View File

@ -0,0 +1,29 @@
# This workflow will inspect a pull request to ensure there is a linked issue or a
# valid issue is mentioned in the body. If neither is present it fails the check and adds
# a comment alerting users of this missing requirement.
name: VerifyIssue
on:
pull_request:
branches-ignore:
- 'renovate/**'
types:
- edited
- synchronize
- opened
- reopened
- labeled
- unlabeled
jobs:
verify_linked_issue:
runs-on: ubuntu-latest
name: Ensure Pull Request has a linked issue.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-issue') }}
steps:
- name: Verify Linked Issue
uses: hattan/verify-linked-issue-action@v1.1.5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

41
.github/workflows/refresh-licenses.yml vendored Normal file
View File

@ -0,0 +1,41 @@
# Refresh the "licenses" directory and create a PR if there are any changes
name: Refresh licenses directory
on:
workflow_dispatch:
schedule:
- cron: "30 0 * * 1"
env:
GOLANG_VERSION: "1.24.x"
jobs:
licenses:
name: Refresh licenses
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Generate licenses
run: |
make licenses
- name: Create Pull Request if licenses have been updated
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.REPO_GHA_PAT }}
title: "chore: refresh licenses directory"
body: "Refresh the licenses directory"
branch: "license-updater"
author: "license-updater <license-updater@users.noreply.github.com>"
add-paths: |
licenses/**
commit-message: "chore: refresh licenses directory"
signoff: true

36
.github/workflows/registry-clean.yml vendored Normal file
View File

@ -0,0 +1,36 @@
# This workflow runs daily to clean up the `*-testing` images older than the
# cut-off period specified in `snok/container-retention-policy`
name: clean-testing-package
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *'
env:
IMAGE_NAME: "cloudnative-pg-testing"
CONTAINER_IMAGE_NAMES: "pgbouncer-testing, postgresql-testing, postgis-testing"
jobs:
clean-ghcr:
name: delete old testing container images
runs-on: ubuntu-latest
steps:
- name: Delete '-testing' operator images in ${{ env.IMAGE_NAME }}
uses: snok/container-retention-policy@v3.0.0
with:
image-names: ${{ env.IMAGE_NAME }}
cut-off: 5d
keep-n-most-recent: 1
account: ${{ github.repository_owner }}
token: ${{ secrets.GITHUB_TOKEN }}
- name: Delete '-testing' operand images
uses: snok/container-retention-policy@v3.0.0
if: ${{ github.repository_owner == 'cloudnative-pg' }}
with:
image-names: ${{ env.CONTAINER_IMAGE_NAMES }}
cut-off: 1w
keep-n-most-recent: 1
account: "cloudnative-pg"
token: ${{ secrets.REPO_GHA_PAT }}

37
.github/workflows/release-pr.yml vendored Normal file
View File

@ -0,0 +1,37 @@
# Create a PR for a release when a commit is pushed on a release/v* branch
name: release-pr
on:
push:
branches:
- release/v*
jobs:
pull-request:
runs-on: ubuntu-24.04
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Get tag
run: |
TAG=${GITHUB_REF##*/v}
if [[ "${TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
DEST=$(echo ${TAG} | awk -F '[.]' '{print "release-"$1"."$2}')
else
DEST="main"
fi
echo "TAG=${TAG}" >> $GITHUB_ENV
echo "DEST=${DEST}" >> $GITHUB_ENV
-
name: Pull Request
id: open-pr
uses: repo-sync/pull-request@v2.12
with:
destination_branch: ${{ env.DEST }}
github_token: ${{ secrets.GITHUB_TOKEN }}
pr_body: "Automated PR. Will trigger the ${{ env.TAG }} release when approved."
pr_label: release
pr_title: "Version tag to ${{ env.TAG }}"

416
.github/workflows/release-publish.yml vendored Normal file
View File

@ -0,0 +1,416 @@
# When a release tag is pushed, create and publish operator images on GitHub
# Registry. Then generate a release on GitHub.
name: release-publish
on:
push:
tags:
- v*
env:
GOLANG_VERSION: "1.24.x"
REGISTRY: "ghcr.io"
permissions:
contents: write
packages: write
id-token: write
jobs:
check-version:
name: Evaluate release tag
runs-on: ubuntu-24.04
outputs:
is_latest: ${{ env.IS_LATEST }}
is_stable: ${{ env.IS_STABLE }}
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
# To identify the commit we need the history and all the tags.
fetch-depth: 0
-
name: Check release version
run: |
tag="${GITHUB_REF#refs/tags/v}"
latest_release_branch=$(git branch -rl 'origin/release-*' | sort -r | head -n1 | sed -e 's/^.*\(release-.*\)/\1/')
current_release_branch=$(echo "${tag}" | sed -e 's/\([0-9]\+.[0-9]\+\).*/release-\1/')
is_latest="false"
if [[ "$latest_release_branch" == "$current_release_branch" ]]; then
is_latest="true"
fi
is_stable="false"
if [[ "$tag" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
is_stable="true"
fi
echo "IS_LATEST=${is_latest}" >> $GITHUB_ENV
echo "IS_STABLE=${is_stable}" >> $GITHUB_ENV
release:
name: Create Github release
runs-on: ubuntu-24.04
needs:
- check-version
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Get tag
run: |
tag="${GITHUB_REF#refs/tags/v}"
version="${tag#v}"
file=$(echo ${version} | awk -F '[.]' '{print "release_notes/v"$1"."$2".md"}')
echo "TAG=${tag}" >> $GITHUB_ENV
echo "VERSION=${version}" >> $GITHUB_ENV
echo "FILE=${file}" >> $GITHUB_ENV
-
name: Generate release notes
run: |
docker run --rm -v $(pwd):/src mist/submark \
submark -O --h2 "Version ${{ env.TAG }}" \
--out-file /src/release_notes.md \
/src/docs/src/${{ env.FILE }}
-
name: Release
uses: softprops/action-gh-release@v2
with:
body_path: release_notes.md
draft: false
name: v${{ env.TAG }}
files: releases/cnpg-${{ env.VERSION }}.yaml
make_latest: ${{ needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' }}
prerelease: ${{ needs.check-version.outputs.is_stable == 'false' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
release-binaries:
name: Build containers
runs-on: ubuntu-24.04
needs:
- check-version
outputs:
version: ${{ env.IMAGE_TAG }}
author_name: ${{ steps.build-meta.outputs.author_name }}
author_email: ${{ steps.build-meta.outputs.author_email }}
platforms: ${{ env.PLATFORMS }}
ubi_img: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }}
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
# To identify the commit we need the history and all the tags.
fetch-depth: 0
-
name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
-
name: Build meta
id: build-meta
run: |
commit_sha=${{ github.sha }}
commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}")
tag="${GITHUB_REF#refs/tags/v}"
# get git user and email
author_name=$(git show -s --format='%an' "${commit_sha}")
author_email=$(git show -s --format='%ae' "${commit_sha}")
# use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0)
commit_version=$(git describe --tags --match 'v*' "${commit_sha}"| sed -e 's/^v//; s/-g[0-9a-f]\+$//; s/-\([0-9]\+\)$/-dev\1/')
commit_short=$(git rev-parse --short "${commit_sha}")
echo "DATE=${commit_date}" >> $GITHUB_ENV
echo "VERSION=${commit_version}" >> $GITHUB_ENV
echo "IMAGE_TAG=${tag}" >> $GITHUB_ENV
echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV
echo "COMMIT=${commit_short}" >> $GITHUB_ENV
echo "author_name=${author_name}" >> $GITHUB_OUTPUT
echo "author_email=${author_email}" >> $GITHUB_OUTPUT
-
name: Import GPG key
id: import_gpg
uses: crazy-max/ghaction-import-gpg@v6
with:
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.GPG_PASSPHRASE }}
-
name: Set GoReleaser environment
env:
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
run: |
echo GOPATH=$(go env GOPATH) >> $GITHUB_ENV
echo PWD=$(pwd) >> $GITHUB_ENV
echo "$GPG_PRIVATE_KEY" > gpg_signing_key.asc
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: v2
args: release --clean --timeout 60m
env:
DATE: ${{ env.DATE }}
COMMIT: ${{ env.COMMIT }}
VERSION: ${{ env.VERSION }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }}
NFPM_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
-
name: Publish Krew
if: |
needs.check-version.outputs.is_latest == 'true' &&
needs.check-version.outputs.is_stable == 'true'
uses: rajatjindal/krew-release-bot@v0.0.47
with:
krew_template_file: dist/krew/cnpg.yaml
-
name: Detect platforms
run: |
# Keep in mind that adding more platforms (architectures) will increase the building
# time even if we use the ghcache for the building process.
platforms="linux/amd64,linux/arm64"
echo "PLATFORMS=${platforms}" >> $GITHUB_ENV
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: ${{ env.PLATFORMS }}
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
name: Build and push
uses: docker/bake-action@v6
id: bake-push
env:
environment: "production"
buildVersion: ${{ env.VERSION }}
tag: ${{ env.IMAGE_TAG }}
registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }}
revision: ${{ env.COMMIT }}
latest: ${{ needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' }}
with:
source: .
push: true
no-cache: true
targets: "default"
-
name: Install cosign
uses: sigstore/cosign-installer@v3
# See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/
# and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on
# how to use cosign.
- name: Sign images
run: |
images=$(echo '${{ steps.bake-push.outputs.metadata }}' |
jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"'
)
cosign sign --yes ${images}
olm-bundle:
name: Create OLM bundle and catalog
runs-on: ubuntu-24.04
needs:
- check-version
- release-binaries
if: |
(always() && !cancelled()) &&
needs.release-binaries.result == 'success' &&
needs.check-version.outputs.is_latest == 'true' &&
needs.check-version.outputs.is_stable == 'true'
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: ${{ needs.release-binaries.outputs.platforms }}
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GOLANG_VERSION }}
check-latest: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set bundle variables
env:
UBI_IMG: ${{ needs.release-binaries.outputs.ubi_img }}
run: |
echo "CONTROLLER_IMG=${UBI_IMG}" >> $GITHUB_ENV
echo "BUNDLE_IMG=${UBI_IMG}-bundle" >> $GITHUB_ENV
echo "CATALOG_IMG=${UBI_IMG}-catalog" >> $GITHUB_ENV
- name: Create bundle
env:
CONTROLLER_IMG: ${{ env.CONTROLLER_IMG }}
BUNDLE_IMG: ${{ env.BUNDLE_IMG }}
CATALOG_IMG: ${{ env.CATALOG_IMG }}
run: |
make olm-catalog
- name: Archive the bundle manifests
uses: actions/upload-artifact@v4
with:
name: bundle
path: |
bundle.Dockerfile
bundle/
cloudnative-pg-catalog.yaml
operatorhub_pr:
name: Create remote PR for OperatorHub
runs-on: ubuntu-24.04
needs:
- release-binaries
- olm-bundle
if: |
(always() && !cancelled()) &&
needs.olm-bundle.result == 'success'
env:
VERSION: ${{ needs.release-binaries.outputs.version }}
steps:
- name: Checkout community-operators
uses: actions/checkout@v4
with:
repository: k8s-operatorhub/community-operators
fetch-depth: 0
persist-credentials: false
- name: Download the bundle
uses: actions/download-artifact@v4
with:
name: bundle
- name: Copy bundle in the community-operators
run: |
mkdir -p "operators/cloudnative-pg/${{ env.VERSION }}"
cp -R bundle/* "operators/cloudnative-pg/${{ env.VERSION }}"
rm -fr cloudnative-pg-catalog.yaml bundle.Dockerfile *.zip bundle/
- name: Create Remote Pull Request
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.REPO_GHA_PAT }}
commit-message: "operator cloudnative-pg (${{ env.VERSION }})"
title: "operator cloudnative-pg (${{ env.VERSION }})"
signoff: true
branch: release-cloudnativepg-${{ env.VERSION }}
delete-branch: true
push-to-fork: cloudnative-pg/community-operators
body: |
Thanks submitting your Operator. Please check below list before you create your Pull Request.
### Updates to existing Operators
* [x] Did you create a `ci.yaml` file according to the [update instructions](https://github.com/operator-framework/community-operators/blob/master/docs/operator-ci-yaml.md)?
* [x] Is your new CSV pointing to the previous version with the `replaces` property if you chose `replaces-mode` via the `updateGraph` property in `ci.yaml`?
* [x] Is your new CSV referenced in the [appropriate channel](https://github.com/operator-framework/community-operators/blob/master/docs/packaging-operator.md#channels) defined in the `package.yaml` or `annotations.yaml` ?
* [x] Have you tested an update to your Operator when deployed via OLM?
* [x] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing-prerequisites.md#sign-your-work)?
### Your submission should not
* [x] Modify more than one operator
* [x] Modify an Operator you don't own
* [x] Rename an operator - please remove and add with a different name instead
* [x] Modify any files outside the above mentioned folders
* [x] Contain more than one commit. **Please squash your commits.**
### Operator Description must contain (in order)
1. [x] Description about the managed Application and where to find more information
2. [x] Features and capabilities of your Operator and how to use it
3. [x] Any manual steps about potential pre-requisites for using your Operator
### Operator Metadata should contain
* [x] Human readable name and 1-liner description about your Operator
* [x] Valid [category name](https://github.com/operator-framework/community-operators/blob/master/docs/packaging-operator.md#categories)<sup>1</sup>
* [x] One of the pre-defined [capability levels](https://github.com/operator-framework/operator-courier/blob/4d1a25d2c8d52f7de6297ec18d8afd6521236aa2/operatorcourier/validate.py#L556)<sup>2</sup>
* [x] Links to the maintainer, source code and documentation
* [x] Example templates for all Custom Resource Definitions intended to be used
* [x] A quadratic logo
Remember that you can preview your CSV [here](https://operatorhub.io/preview).
--
<sup>1</sup> If you feel your Operator does not fit any of the pre-defined categories, file an issue against this repo and explain your need
<sup>2</sup> For more information see [here](https://sdk.operatorframework.io/docs/overview/#operator-capability-level)
publish_bundle:
name: Publish OLM Bundle
needs:
- olm-bundle
- release-binaries
if: |
(always() && !cancelled()) &&
needs.olm-bundle.result == 'success' &&
github.repository_owner == 'cloudnative-pg'
env:
VERSION: ${{ needs.release-binaries.outputs.version }}
runs-on: ubuntu-24.04
steps:
-
name: Checkout artifact
uses: actions/checkout@v4
with:
repository: cloudnative-pg/artifacts
token: ${{ secrets.REPO_GHA_PAT }}
ref: main
fetch-depth: 0
-
name: Configure git user
run: |
git config user.email "${{ needs.release-binaries.outputs.author_email }}"
git config user.name "${{ needs.release-binaries.outputs.author_name }}"
-
name: Download the bundle
uses: actions/download-artifact@v4
with:
name: bundle
-
name: Copy the bundle
run: |
mkdir -p "bundles/${{ env.VERSION }}"
cp -R bundle/* "bundles/${{ env.VERSION }}"
rm -fr cloudnative-pg-catalog.yaml bundle.Dockerfile *.zip bundle/
-
name: Prepare commit message
env:
COMMIT_MESSAGE: |
operator cloudnative-pg (${{ env.VERSION }})
run: |
# Skip creating the commit if there are no changes
[ -n "$(git status -s)" ] || exit 0
git add bundles/${{ env.VERSION }}
git commit -sm "${COMMIT_MESSAGE}"
-
name: Push commit
uses: ad-m/github-push-action@v0.8.0
with:
github_token: ${{ secrets.REPO_GHA_PAT }}
repository: cloudnative-pg/artifacts
branch: "main"

29
.github/workflows/release-tag.yml vendored Normal file
View File

@ -0,0 +1,29 @@
# Create a tag when a PR on a release/v* branch is merged
name: release-tag
on:
pull_request:
types:
- closed
branches:
- release-*
- main
paths:
- 'pkg/versions/versions.go'
jobs:
tag:
runs-on: ubuntu-24.04
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create tag
if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/v')
uses: christophebedard/tag-version-commit@v1.7.0
with:
token: ${{ secrets.REPO_GHA_PAT }}
version_regex: '^Version tag to ([0-9]+\.[0-9]+\.[0-9]+(?:-[a-z][0-9a-z]*)?)'
version_tag_prefix: v
dry_run: false

26
.github/workflows/require-labels.yml vendored Normal file
View File

@ -0,0 +1,26 @@
# Verify if a pull request contains the labels required to enable the merge button.
name: require-labels
on:
pull_request:
types:
- opened
- synchronize
- reopened
- labeled
- unlabeled
env:
REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
require-labels:
name: Require labels
runs-on: ubuntu-24.04
steps:
- name: Require labels
uses: docker://agilepathway/pull-request-label-checker:v1.6.65
with:
any_of: "ok to merge :ok_hand:"
none_of: "do not merge"
repo_token: ${{ env.REPO_TOKEN }}

44
.github/workflows/snyk.yml vendored Normal file
View File

@ -0,0 +1,44 @@
# Run the Snyk scanning on the project only when something
# is pushed into main
name: Snyk scanning
on:
push:
branches:
- main
workflow_dispatch:
jobs:
security:
name: Security scan
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Static Code Analysis
uses: snyk/actions/golang@0.4.0
continue-on-error: true
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
with:
command: 'code test'
args: --sarif-file-output=snyk-static.sarif
- name: Upload result to GitHub Code Scanning
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: snyk-static.sarif
- name: Vulnerability scan
uses: snyk/actions/golang@0.4.0
continue-on-error: true
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
with:
args: --sarif-file-output=snyk-test.sarif
- name: Upload result to GitHub Code Scanning
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: snyk-test.sarif

31
.github/workflows/spellcheck.yml vendored Normal file
View File

@ -0,0 +1,31 @@
name: spellcheck-woke
on:
push:
workflow_dispatch:
jobs:
# Check code for non-inclusive language
woke:
name: Run woke
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: woke
uses: get-woke/woke-action@v0
with:
# Cause the check to fail on any broke rules
fail-on-error: true
# Enforce en-us spell check
spellcheck:
name: Run spellcheck
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Spellcheck
uses: rojopolis/spellcheck-github-actions@0.47.0

17
.github/workflows/sync-api.yml vendored Normal file
View File

@ -0,0 +1,17 @@
name: Sync API
on:
push:
branches:
- main
jobs:
trigger-sync:
runs-on: ubuntu-latest
steps:
- name: Invoke repository dispatch
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.REPO_GHA_PAT }}
repository: cloudnative-pg/api
event-type: sync-api

39
.gitignore vendored Normal file
View File

@ -0,0 +1,39 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
/bin/
/testbin/
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
# Testing artifacts and logs
_*/
tests/e2e/out/
tests/e2e/*_logs/
# goreleaser
/dist/
*.asc
# Spellcheck dictionary
dictionary.dic
# OLM ignores
bundle/
bundle.Dockerfile
catalog
catalog.Dockerfile
cloudnative-pg-catalog.yaml

130
.golangci.yml Normal file
View File

@ -0,0 +1,130 @@
linters-settings:
lll:
line-length: 120
gci:
sections:
- standard
- default
- prefix(github.com/cloudnative-pg/cloudnative-pg)
- blank
- dot
gosec:
excludes:
- G101 # remove this exclude when https://github.com/securego/gosec/issues/1001 is fixed
linters:
# please, do not use `enable-all`: it's deprecated and will be removed soon.
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
disable-all: true
enable:
- asciicheck
- bodyclose
- dogsled
- dupl
- durationcheck
- errcheck
- copyloopvar
- gci
- gocognit
- goconst
- gocritic
- gocyclo
- gofmt
- gofumpt
- goheader
- goimports
- gomoddirectives
- gomodguard
- goprintffuncname
- gosec
- gosimple
- govet
- ginkgolinter
- importas
- ineffassign
- lll
- makezero
- misspell
- nakedret
- nestif
- prealloc
- predeclared
- revive
- rowserrcheck
- sqlclosecheck
- staticcheck
- stylecheck
- thelper
- tparallel
- typecheck
- unconvert
- unparam
- unused
- wastedassign
- whitespace
# to be checked:
# - errorlint
# - forbidigo
# - forcetypeassert
# - goerr113
# - ifshort
# - nilerr
# - nlreturn
# - noctx
# - nolintlint
# - paralleltest
# - promlinter
# - tagliatelle
# - wrapcheck
# don't enable:
# - cyclop
# - depguard
# - exhaustive
# - exhaustivestruct
# - funlen
# - gochecknoglobals
# - gochecknoinits
# - godot
# - godox
# - gomnd
# - testpackage
# - wsl
# deprecated:
# - deadcode
# - golint
# - interfacer
# - maligned
# - scopelint
# - structcheck
# - varcheck
run:
timeout: 5m
issues:
exclude-rules:
# Allow dot imports for ginkgo and gomega
- source: ginkgo|gomega
linters:
- revive
text: "should not use dot imports"
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- goconst
# Exclude lll issues for lines with long annotations
- linters:
- lll
source: "//\\s*\\+"
# We have no control of this in zz_generated files and it looks like that excluding those files is not enough
# so we disable "ST1016: methods on the same type should have the same receiver name" in api directory
- linters:
- stylecheck
text: "ST1016:"
path: api/
exclude-use-default: false
exclude-files:
- zz_generated.*

142
.goreleaser.yml Normal file
View File

@ -0,0 +1,142 @@
version: 2
project_name: cnpg
release:
github:
owner: cloudnative-pg
name: cloudnative-pg
env:
- GO111MODULE=on
- CGO_ENABLED=0
before:
hooks:
- go mod download
builds:
- id: manager
binary: manager/manager_{{ .Arch }}
main: cmd/manager/main.go
no_unique_dist_dir: true
gcflags:
- all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}}
ldflags:
- -s
- -w
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion={{.Env.VERSION}}
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit={{.Env.COMMIT}}
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate={{.Env.DATE}}
goos:
- linux
goarch:
- amd64
- arm64
- id: manager-race
binary: manager/manager_{{ .Arch }}
main: cmd/manager/main.go
no_unique_dist_dir: true
skip: >-
{{ if and (isEnvSet "RACE") (eq .Env.RACE "true") }}false{{ else }}true{{ end }}
gcflags:
- all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}}
ldflags:
- -race
- -s
- -w
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion={{.Env.VERSION}}
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit={{.Env.COMMIT}}
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate={{.Env.DATE}}
goos:
- linux
goarch:
- amd64
- arm64
- id: kubectl-cnpg
binary: kubectl-cnpg
main: cmd/kubectl-cnpg/main.go
gcflags:
- all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}}
ldflags:
- -s
- -w
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion={{.Env.VERSION}}
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit={{.Env.COMMIT}}
- -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate={{.Env.DATE}}
goos:
- darwin
- linux
- windows
goarch:
- amd64
- 386
- arm64
- arm
- ppc64le
- s390x
goarm:
- 5
- 6
- 7
ignore:
- goos: darwin
goarch: 386
- goos: windows
goarch: ppc64le
- goos: windows
goarch: s390x
archives:
- name_template: >-
kubectl-cnpg_{{ .Version }}_
{{- .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }}
builds:
- kubectl-cnpg
nfpms:
- id: kubectl-cnpg
file_name_template: >-
kubectl-cnpg_{{ .Version }}_
{{- .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }}
homepage: https://github.com/cloudnative-pg/cloudnative-pg
bindir: /usr/local/bin
maintainer: 'Marco Nenciarini <marco.nenciarini@enterprisedb.com>'
builds:
- kubectl-cnpg
formats:
- rpm
- deb
rpm:
signature:
key_file: gpg_signing_key.asc
deb:
signature:
key_file: gpg_signing_key.asc
checksum:
name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt'
snapshot:
version_template: "{{ .Tag }}-next"
changelog:
disable: true
signs:
- artifacts: checksum
args: ["--batch", "-u", "{{ .Env.GPG_FINGERPRINT }}", "--output", "${signature}", "--detach-sign", "${artifact}"]
krews:
- name: cnpg
homepage: https://github.com/cloudnative-pg/cloudnative-pg
short_description: 'Manage your CloudNativePG clusters'
skip_upload: true
description: 'This plugin provides multiple commands to help you manage your CloudNativePG clusters.'

5
.snyk Normal file
View File

@ -0,0 +1,5 @@
exclude:
global:
- releases/**
- tests/**
- docs/**

31
.spellcheck.yaml Normal file
View File

@ -0,0 +1,31 @@
matrix:
- name: markdown
sources:
- 'docs/src/*.md'
- 'docs/src/*/*.md'
- 'config/olm-manifests/bases/*.yaml'
default_encoding: utf-8
aspell:
lang: en
d: en_US
dictionary:
wordlists:
- .wordlist-en-custom.txt
pipeline:
- pyspelling.filters.context:
context_visible_first: true
delimiters:
# Ignore multiline content between fences (fences can have 3 or more back ticks)
# ```
# content
# ```
- open: '(?s)^(?P<open> *`{3,})'
close: '^(?P=open)$'
# Ignore text between inline back ticks
- open: '(?P<open>`+)'
close: '(?P=open)'
- open: '(?P<open><!--)'
close: '(?P<close>-->)'
- open: '.*base64data.*'
close: "$"
- pyspelling.filters.url:

16
.woke.yaml Normal file
View File

@ -0,0 +1,16 @@
ignore_files:
- config/crd/bases/postgresql.cnpg.io_poolers.yaml
- tests/e2e/fixtures/upgrade/current-manifest.yaml
- tests/e2e/fixtures/upgrade/current-manifest-prime.yaml
- hack/install-config.yaml.template
# Default internal rules refer https://github.com/get-woke/woke/blob/main/internal/rule/default.yaml
rules:
- name: master
terms:
- master
alternatives:
- leader
- primary
options:
word_boundary: true

5
.wokeignore Normal file
View File

@ -0,0 +1,5 @@
licenses
testdata
releases/
.github/workflows/release-publish.yml
.github/workflows/continuous-integration.yml

1430
.wordlist-en-custom.txt Normal file

File diff suppressed because it is too large Load Diff

64
ADOPTERS.md Normal file
View File

@ -0,0 +1,64 @@
# Adopters
Below is a list of organizations and users who have publicly shared that
theyre using PostgreSQL in Kubernetes with the CloudNativePG operator in a
production environment.
The purpose of this list is to inspire others to join the movement and help
grow our open-source community and project.
Adding your organization takes just 5 minutes of your time, but it means a lot
to us!
## How to Add Your Organization
You can add your organization to this list in two ways:
- [Open a pull request](https://github.com/cloudnative-pg/cloudnative-pg/pulls)
to directly update this file.
- [Edit the file](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/ADOPTERS.md)
directly on GitHub.
Use the commit title: **"docs: add <ORGANIZATION_NAME> to `ADOPTERS.md`"** and
be sure to [sign off your work](contribute/README.md#sign-your-work).
If you need any assistance, feel free to ask in our Slack chat—were here to
help!
## CloudNativePG Adopters
This list is sorted in chronological order, based on the submission date.
| Organization | Contact | Date | Description of Use |
| ------------ | ------- | ---- | ------------------ |
| [EDB](https://enterprisedb.com) | @gbartolini | 2023-02-21 | EDB's DataBase as a Service solution, [BigAnimal](https://www.enterprisedb.com/products/biganimal-cloud-postgresql), relies on CloudNativePG to run PostgreSQL and Postgres Distributed workloads. EDB is one of the primary contributors to the open source PostgreSQL project and the founder of CloudNativePG. |
| [Clustermarket](https://clustermarket.com/) | @itay-grudev | 2023-02-25 | Primary production database cluster. Clustermarket provides the easiest way to manage shared lab instrument scheduling and get all your team members' schedules aligned. |
| [Opencell](https://opencellsoft.com/) | @AntoineMicheaOpencell | 2023-02-27 | Opencell is an open source agile monetization platform that uses CloudNativePG to run PostgreSQL clusters for its SaaS. |
| [Clastix](https://clastix.io/) | @prometherion | 2023-03-14 | Used as an available [`DataStore` driver](https://kamaji.clastix.io/guides/postgresql-datastore/) for [Kamaji](https://github.com/clastix/kamaji) `TenantControlPlane` resources, also known as Kubernetes Control Planes running as regular pods in a management cluster to offer Kubernetes as a Service as a Cloud hyper-scaler. |
| [Tembo](https://tembo.io/) | @tembo-io | 2023-07-17 | Tembo is the developer platform for PostgreSQL extensions. Build and share extensions with [Trunk](https://pgt.dev), and use any extension on Tembo Cloud. |
| [CNDI](https://cndi.dev) | @johnstonmatt | 2023-08-21 | Provides simple workflow to deploy self-hosted CloudNativePG clusters with GitOps and Infrastructure as Code. |
| [PITS Global Data Recovery Services](https://www.pitsdatarecovery.net/) | @benjx1990 | 2023-09-07 | CloudNativePG is used to easily manage highly-loaded database clusters |
| [OptimaData](https://www.optimadata.nl) | @edco-wallet | 2023-09-25 | OptimaData as the Dutch database expert company has done several projects running CloudNativePG for managing Postgres clusters. Read our [how to run Postgres on Kubernetes blogpost](https://www.optimadata.nl/blogs/3/k9pv6z-how-to-postgres-on-kubernetes%2C-part-2) to learn more and how easy you can deploy with CloudNativePG. |
| [Enix](https://enix.io) | @rdegez | 2023-10-06 | Enix is a French Managed Services Provider specializing in the operation of Kubernetes clusters across all types of infrastructure (VMs and bare-metal on both public and private clouds). Our customer platforms often require PostgreSQL databases, and we are pleased to use CloudNativePG to install & manage them. |
| [WienIT](https://wienit.at) | @smiyc | 2023-10-11 |Hello 👋 We are WienIT, the central IT & business partner of [Wiener Stadtwerke Group](https://wienerstadtwerke.at). As IT service provider we´re using CloudNativePG to provide high available PostgreSQL clusters. |
| [Shinkansen](https://shinkansen.finance) | @utaladriz, @afiebig | 2023-11-16 | Primary production high available PostgreSQL cluster, ISO27001 Backup and Recovery Compliance |
| [Ænix](https://aenix.io) | @kvaps | 2024-02-11 | Ænix provides consulting services for cloud providers and uses CloudNativePG in free PaaS platform [Cozystack](https://cozystack.io) for running PostgreSQL-as-a-Service. |
| [IBM](https://www.ibm.com) | @pgodowski | 2024-02-20 | IBM uses CloudNativePG as the embedded SQL database within the family of [IBM Cloud Pak](https://www.ibm.com/cloud-paks) products, running as customer-managed software on top of [OpenShift Container Platform](https://www.redhat.com/en/technologies/cloud-computing/openshift/container-platform). |
| [Google Cloud](https://cloud.google.com/) | @mastersingh24 | 2024-03-12 | Leverage the full potential of cutting-edge PostgreSQL and CloudNativePG on [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) with EDB Community 360 PostgreSQL available in the [Google Cloud Marketplace](https://console.cloud.google.com/marketplace/product/public-edb-ppas/edb-postgresql). |
| [Syself](https://syself.com) | @batistein | 2024-05-06 | Syself offers a simplified, multi-cloud Managed Kubernetes platform based on Cluster API and uses CloudNativePG for managing Postgres clusters in our internal infrastructure. |
| [ParadeDB](https://paradedb.com) | @philippemnoel | 2024-07-10 | ParadeDB is an Elasticsearch alternative on Postgres. It leverages CloudNativePG to manage ParadeDB Postgres clusters which connect to a customer's existing Postgres infrastructure via logical (streaming) replication. |
| [REWE International AG](https://rewe-group.at/en) | @rewemkris | 2024-08-21 |Hello! 👋 We are the DBMS Team of RIAG IT, responsible for managing databases worldwide for our stores, warehouses, and online shops. We leverage CloudNativePG to provide PostgreSQL as a Service, creating highly available databases running on Kubernetes in both Google Cloud and on-premises environments.|
| [Microsoft Azure](https://azure.microsoft.com/en-us/) | @KenKilty | 2024-08-22 | Learn how to [deploy](https://learn.microsoft.com/azure/aks/postgresql-ha-overview) PostgreSQL on [Azure Kubernetes Services (AKS)](https://learn.microsoft.com/azure/aks/what-is-aks) with [EDB commercial support](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.edb-enterprise) and [EDB Postgres-as-a-Service](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.biganimal-prod-v1) offerings available in the [Azure Marketplace](https://azuremarketplace.microsoft.com/).|
| [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.|
| [Telnyx](https://www.telnyx.com) | @aryklein | 2024-09-24 | Telnyx leverages PostgreSQL as its relational database for internal services, managing databases with high availability using CloudNativePG across multiple Kubernetes clusters in different sites, with distributed replica clusters to ensure data redundancy and resilience. |
| [Alpcot](https://alpcot.se) | @svenakela | 2024-09-24 | Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. |
| [GEICO Tech](https://www.geico.com/tech/) | @ardentperf | 2024-09-24 | GEICO Tech is building the most consumer-centric insurance offerings in America. CloudNativePG is used to provide a highly available Kubernetes-based Postgres service, both in the cloud and on-premises. |
| [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. |
| [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. |
| [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. |
| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. |
| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. |
| [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. |
| [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. |
| [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. |
| [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. |

20
CODEOWNERS Normal file
View File

@ -0,0 +1,20 @@
# The CODEOWNERS file is used to define individuals or teams that are
# responsible for code in a repository. For details, please refer to
# https://docs.github.com/en/free-pro-team@latest/github/creating-cloning-and-archiving-repositories/about-code-owners
* @cloudnative-pg/maintainers
# Component owners
# See https://github.com/cloudnative-pg/governance/blob/main/COMPONENT-OWNERS.md#cloudnative-pg
# OLM
/config/olm-*/ @cloudnative-pg/maintainers @NiccoloFei
# Documentation
/docs/ @cloudnative-pg/maintainers @jsilvela
# Tests
/.github/ @cloudnative-pg/maintainers @jsilvela @NiccoloFei @litaocdl
/hack/ @cloudnative-pg/maintainers @jsilvela @NiccoloFei @litaocdl
/tests/ @cloudnative-pg/maintainers @jsilvela @NiccoloFei @litaocdl

4
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,4 @@
# Code of Conduct
The Code of Conduct for the CloudNativePG Community can be found in the
[governance repository](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md).

62
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,62 @@
# Contributing to CloudNativePG
Welcome! We are glad that you want to contribute to our CloudNativePG project! 💖
As you get started, you are in the best position to give us feedbacks on areas of
our project that we need help with, including:
* Problems found while setting up the development environment
* Gaps in our documentation
* Bugs in our Github actions
* Promotion of PostgreSQL on Kubernetes with our operator
First, though, it is important that you read the [code of
conduct](CODE_OF_CONDUCT.md).
The guidelines below are a starting point. We don't want to limit your
creativity, passion, and initiative. If you think there's a better way, please
feel free to bring it up in a Github discussion, or open a pull request. We're
certain there are always better ways to do things, we just need to start some
constructive dialogue!
## Ways to contribute
We welcome many types of contributions including:
* New features
* Builds, CI/CD
* Bug fixes
* [Documentation](docs/README.md)
* Issue Triage
* Answering questions on Slack or Github Discussions
* Web design
* Communications / Social Media / Blog Posts
* Events participation
* Release management
For development contributions, please refer to the separate section called
["Contributing to the source code"](contribute/README.md).
## Ask for Help
The best way to reach us with a question when contributing is to drop a line in
our [Slack channel](README.md#communications), or start a new Github discussion.
## Raising Issues
When raising issues, please specify the following:
- Setup details as specified in the issue template
- A scenario where the issue occurred (with details on how to reproduce it)
- Errors and log messages that are displayed by the involved software
- Any other detail that might be useful
If you are trying to report a vulnerability, please refer to the
[security policy](SECURITY.md).
## Meetings
We extend a warm welcome to everyone to join any of our meetings.
For further details, please visit the
[CloudNativePG Community Meetings](https://github.com/cloudnative-pg#cloudnativepg-community-meetings) page.

17
Dockerfile Normal file
View File

@ -0,0 +1,17 @@
ARG BASE=gcr.io/distroless/static-debian12:nonroot
# This builder stage it's only because we need a command
# to create a symlink and we do not have it in a distroless image
FROM gcr.io/distroless/static-debian12:debug-nonroot AS builder
ARG TARGETARCH
SHELL ["/busybox/sh", "-c"]
RUN ln -sf operator/manager_${TARGETARCH} manager
FROM ${BASE}
WORKDIR /
COPY --chown=nonroot:nonroot --chmod=0755 dist/manager/* operator/
COPY --from=builder /home/nonroot/ .
COPY licenses /licenses
COPY LICENSE /licenses
USER 65532:65532
ENTRYPOINT ["/manager"]

5
GOVERNANCE.md Normal file
View File

@ -0,0 +1,5 @@
# CloudNativePG Governance
Explore the governance policies for the CloudNativePG Community in the
[dedicated repository](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md).

202
LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

4
MAINTAINERS.md Normal file
View File

@ -0,0 +1,4 @@
# CloudNativePG Maintainers
You can find the current list of maintainers for the CloudNativePG project in the
[governance repository](https://github.com/cloudnative-pg/governance/blob/main/MAINTAINERS.md).

393
Makefile Normal file
View File

@ -0,0 +1,393 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Image URL to use all building/pushing image targets
IMAGE_NAME ?= ghcr.io/cloudnative-pg/cloudnative-pg-testing
# Prevent e2e tests to proceed with empty tag which
# will be considered as "latest".
ifeq (,$(CONTROLLER_IMG))
IMAGE_TAG = $(shell (git symbolic-ref -q --short HEAD || git describe --tags --exact-match) | tr / -)
ifneq (,${IMAGE_TAG})
CONTROLLER_IMG = ${IMAGE_NAME}:${IMAGE_TAG}
endif
endif
CATALOG_IMG ?= ${CONTROLLER_IMG}-catalog
BUNDLE_IMG ?= ${CONTROLLER_IMG}-bundle
INDEX_IMG ?= ${CONTROLLER_IMG}-index
COMMIT := $(shell git rev-parse --short HEAD || echo unknown)
DATE := $(shell git log -1 --pretty=format:'%ad' --date short)
VERSION := $(shell git describe --tags --match 'v*' | sed -e 's/^v//; s/-g[0-9a-f]\+$$//; s/-\([0-9]\+\)$$/-dev\1/')
LDFLAGS= "-X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion=${VERSION} $\
-X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit=${COMMIT} $\
-X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate=${DATE}"
DIST_PATH := $(shell pwd)/dist
OPERATOR_MANIFEST_PATH := ${DIST_PATH}/operator-manifest.yaml
LOCALBIN ?= $(shell pwd)/bin
BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.6.0
CONTROLLER_TOOLS_VERSION ?= v0.17.2
GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca
GORELEASER_VERSION ?= v2.8.1
SPELLCHECK_VERSION ?= 0.47.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.39.2
OPM_VERSION ?= v1.51.0
PREFLIGHT_VERSION ?= 1.12.1
OPENSHIFT_VERSIONS ?= v4.12-v4.18
ARCH ?= amd64
export CONTROLLER_IMG
export BUILD_IMAGE
export POSTGRES_IMAGE_NAME
export OPERATOR_MANIFEST_PATH
# We don't need `trivialVersions=true` anymore, with `crd` it's ok for multi versions
CRD_OPTIONS ?= "crd"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
print-version:
echo ${VERSION}
ENVTEST_ASSETS_DIR=$$(pwd)/testbin
test: generate fmt vet manifests envtest ## Run tests.
mkdir -p ${ENVTEST_ASSETS_DIR} ;\
source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\
export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=60s ;\
export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=60s ;\
go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils/...
test-race: generate fmt vet manifests envtest ## Run tests enabling race detection.
mkdir -p ${ENVTEST_ASSETS_DIR} ;\
source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\
go run github.com/onsi/ginkgo/v2/ginkgo -r -p --skip-package=e2e \
--race --keep-going --fail-on-empty --randomize-all --randomize-suites
e2e-test-kind: ## Run e2e tests locally using kind.
hack/e2e/run-e2e-kind.sh
e2e-test-local: ## Run e2e tests locally using the default kubernetes context.
hack/e2e/run-e2e-local.sh
##@ Build
build: generate fmt vet build-manager build-plugin ## Build binaries.
build-manager: generate fmt vet ## Build manager binary.
go build -o bin/manager -ldflags ${LDFLAGS} ./cmd/manager
build-plugin: generate fmt vet ## Build plugin binary.
go build -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg
build-race: generate fmt vet build-manager-race build-plugin-race ## Build the binaries adding the -race option.
build-manager-race: generate fmt vet ## Build manager binary with -race option.
go build -race -o bin/manager -ldflags ${LDFLAGS} ./cmd/manager
build-plugin-race: generate fmt vet ## Build plugin binary.
go build -race -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg
run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config.
go run ./cmd/manager
docker-build: go-releaser ## Build the docker image.
GOOS=linux GOARCH=${ARCH} GOPATH=$(go env GOPATH) DATE=${DATE} COMMIT=${COMMIT} VERSION=${VERSION} \
$(GO_RELEASER) build --skip=validate --clean --single-target $(if $(VERSION),,--snapshot); \
builder_name_option=""; \
if [ -n "${BUILDER_NAME}" ]; then \
builder_name_option="--builder ${BUILDER_NAME}"; \
fi; \
DOCKER_BUILDKIT=1 buildVersion=${VERSION} revision=${COMMIT} \
docker buildx bake $${builder_name_option} --set=*.platform="linux/${ARCH}" \
--set distroless.tags="$${CONTROLLER_IMG}" \
--push distroless
olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM installation
set -xeEuo pipefail ;\
CONFIG_TMP_DIR=$$(mktemp -d) ;\
cp -r config "$${CONFIG_TMP_DIR}" ;\
( \
cd "$${CONFIG_TMP_DIR}/config/default" ;\
$(KUSTOMIZE) edit set image controller="$${CONTROLLER_IMG}" ;\
cd "$${CONFIG_TMP_DIR}" ;\
) ;\
rm -fr bundle bundle.Dockerfile ;\
sed -i -e "s/ClusterRole/Role/" "$${CONFIG_TMP_DIR}/config/rbac/role.yaml" "$${CONFIG_TMP_DIR}/config/rbac/role_binding.yaml" ;\
($(KUSTOMIZE) build "$${CONFIG_TMP_DIR}/config/olm-manifests") | \
$(OPERATOR_SDK) generate bundle --verbose --overwrite --manifests --metadata --package cloudnative-pg --channels stable-v1 --use-image-digests --default-channel stable-v1 --version "${VERSION}" ; \
echo -e "\n # OpenShift annotations." >> bundle/metadata/annotations.yaml ;\
echo -e " com.redhat.openshift.versions: $(OPENSHIFT_VERSIONS)" >> bundle/metadata/annotations.yaml ;\
DOCKER_BUILDKIT=1 docker build --push --no-cache -f bundle.Dockerfile -t ${BUNDLE_IMG} . ;\
export BUNDLE_IMG="${BUNDLE_IMG}"
olm-catalog: olm-bundle opm ## Build and push the index image for OLM Catalog
set -xeEuo pipefail ;\
rm -fr catalog* cloudnative-pg-operator-template.yaml ;\
mkdir -p catalog/cloudnative-pg ;\
$(OPM) generate dockerfile catalog
echo -e "Schema: olm.semver\n\
GenerateMajorChannels: true\n\
GenerateMinorChannels: false\n\
Stable:\n\
Bundles:\n\
- Image: ${BUNDLE_IMG}" | envsubst > cloudnative-pg-operator-template.yaml
$(OPM) alpha render-template semver -o yaml < cloudnative-pg-operator-template.yaml > catalog/catalog.yaml ;\
$(OPM) validate catalog/ ;\
$(OPM) index add --mode semver --container-tool docker --bundles "${BUNDLE_IMG}" --tag "${INDEX_IMG}" ;\
docker push ${INDEX_IMG} ;\
DOCKER_BUILDKIT=1 docker build --push -f catalog.Dockerfile -t ${CATALOG_IMG} . ;\
echo -e "apiVersion: operators.coreos.com/v1alpha1\n\
kind: CatalogSource\n\
metadata:\n\
name: cloudnative-pg-catalog\n\
namespace: operators\n\
spec:\n\
sourceType: grpc\n\
image: ${CATALOG_IMG}\n\
secrets:\n\
- cnpg-pull-secret" | envsubst > cloudnative-pg-catalog.yaml ;\
##@ Deployment
install: manifests kustomize ## Install CRDs into a cluster.
$(KUSTOMIZE) build config/crd | kubectl apply --server-side -f -
uninstall: manifests kustomize ## Uninstall CRDs from a cluster.
$(KUSTOMIZE) build config/crd | kubectl delete -f -
deploy: generate-manifest ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config.
kubectl apply --server-side --force-conflicts -f ${OPERATOR_MANIFEST_PATH}
generate-manifest: manifests kustomize ## Generate manifest used for deployment.
set -e ;\
CONFIG_TMP_DIR=$$(mktemp -d) ;\
cp -r config/* $$CONFIG_TMP_DIR ;\
{ \
cd $$CONFIG_TMP_DIR/default ;\
$(KUSTOMIZE) edit add patch --path manager_image_pull_secret.yaml ;\
cd $$CONFIG_TMP_DIR/manager ;\
$(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG}" ;\
$(KUSTOMIZE) edit add patch --path env_override.yaml ;\
$(KUSTOMIZE) edit add configmap controller-manager-env \
--from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" \
--from-literal="STANDBY_TCP_USER_TIMEOUT=5000" ;\
} ;\
mkdir -p ${DIST_PATH} ;\
$(KUSTOMIZE) build $$CONFIG_TMP_DIR/default > ${OPERATOR_MANIFEST_PATH} ;\
rm -fr $$CONFIG_TMP_DIR
manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager webhook paths="./..." output:crd:artifacts:config=config/crd/bases
generate: controller-gen ## Generate code.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
olm-scorecard: operator-sdk ## Run the Scorecard test from operator-sdk
$(OPERATOR_SDK) scorecard ${BUNDLE_IMG} --wait-time 60s --verbose
##@ Formatters and Linters
fmt: ## Run go fmt against code.
go fmt ./...
vet: ## Run go vet against code.
go vet ./...
lint: ## Run the linter.
golangci-lint run
lint-fix: ## Run the linter with --fix.
golangci-lint run --fix
shellcheck: ## Shellcheck for the hack directory.
@{ \
set -e ;\
find -name '*.sh' -exec shellcheck -a -S style {} + ;\
}
spellcheck: ## Runs the spellcheck on the project.
docker run --rm -v $(PWD):/tmp:Z jonasbn/github-action-spellcheck:$(SPELLCHECK_VERSION)
woke: ## Runs the woke checks on project.
docker run --rm -v $(PWD):/src:Z -w /src getwoke/woke:$(WOKE_VERSION) woke -c .woke.yaml
wordlist-ordered: ## Order the wordlist using sort
LANG=C LC_ALL=C sort .wordlist-en-custom.txt > .wordlist-en-custom.txt.new && \
mv -f .wordlist-en-custom.txt.new .wordlist-en-custom.txt
go-mod-check: ## Check if there's any dirty change after `go mod tidy`
go mod tidy ;\
git diff --exit-code go.mod go.sum
run-govulncheck: govulncheck ## Check if there's any known vulnerabilities with the currently installed Go modules
$(GOVULNCHECK) ./...
checks: go-mod-check generate manifests apidoc fmt spellcheck wordlist-ordered woke vet lint run-govulncheck ## Runs all the checks on the project.
##@ Documentation
licenses: go-licenses ## Generate the licenses folder.
# The following statement is expected to fail because our license is unrecognised
$(GO_LICENSES) \
save ./... \
--save_path licenses/go-licenses --force || true
chmod a+rw -R licenses/go-licenses
find licenses/go-licenses \( -name '*.mod' -or -name '*.go' \) -delete
apidoc: genref ## Update the API Reference section of the documentation.
cd ./docs && \
$(GENREF) -c config.yaml \
-include cloudnative-pg \
-o src
##@ Cleanup
clean: ## Clean-up the work tree from build/test artifacts
rm -rf $(LOCALBIN)/kubectl-cnpg $(LOCALBIN)/manager $(DIST_PATH) _*/ tests/e2e/out/ tests/e2e/*_logs/ cover.out
distclean: clean ## Clean-up the work tree removing also cached tools binaries
! [ -d "$(ENVTEST_ASSETS_DIR)" ] || chmod -R u+w $(ENVTEST_ASSETS_DIR)
rm -rf $(LOCALBIN) $(ENVTEST_ASSETS_DIR)
##@ Tools
## Location to install dependencies to
$(LOCALBIN):
mkdir -p $(LOCALBIN)
## Tool Binaries
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
ENVTEST ?= $(LOCALBIN)/setup-envtest
.PHONY: controller-gen
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
$(CONTROLLER_GEN): $(LOCALBIN)
GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
KUSTOMIZE = $(LOCALBIN)/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION))
.PHONY: envtest
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
GENREF = $(LOCALBIN)/genref
genref: ## Download kubernetes-sigs/reference-docs/genref locally if necessary.
$(call go-install-tool,$(GENREF),github.com/kubernetes-sigs/reference-docs/genref@$(GENREF_VERSION))
GO_LICENSES = $(LOCALBIN)/go-licenses
go-licenses: ## Download go-licenses locally if necessary.
$(call go-install-tool,$(GO_LICENSES),github.com/google/go-licenses@latest)
GO_RELEASER = $(LOCALBIN)/goreleaser
go-releaser: ## Download go-releaser locally if necessary.
$(call go-install-tool,$(GO_RELEASER),github.com/goreleaser/goreleaser/v2@$(GORELEASER_VERSION))
.PHONY: govulncheck
GOVULNCHECK = $(LOCALBIN)/govulncheck
govulncheck: ## Download govulncheck locally if necessary.
$(call go-install-tool,$(GOVULNCHECK),golang.org/x/vuln/cmd/govulncheck@latest)
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
# go-install-tool will 'go install' any package $2 and install it to $1.
define go-install-tool
@[ -f $(1) ] || { \
set -e ;\
echo "Downloading $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\
}
endef
.PHONY: operator-sdk
OPERATOR_SDK = $(LOCALBIN)/operator-sdk
operator-sdk: ## Install the operator-sdk app
ifneq ($(shell $(OPERATOR_SDK) version 2>/dev/null | awk -F '"' '{print $$2}'), $(OPERATOR_SDK_VERSION))
@{ \
set -e ;\
mkdir -p $(LOCALBIN) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSL "https://github.com/operator-framework/operator-sdk/releases/download/${OPERATOR_SDK_VERSION}/operator-sdk_$${OS}_$${ARCH}" -o "$(OPERATOR_SDK)" ;\
chmod +x "$(LOCALBIN)/operator-sdk" ;\
}
endif
.PHONY: opm
OPM = $(LOCALBIN)/opm
opm: ## Download opm locally if necessary.
ifneq ($(shell $(OPM) version 2>/dev/null | awk -F '"' '{print $$2}'), $(OPM_VERSION))
@{ \
set -e ;\
mkdir -p $(LOCALBIN) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSL https://github.com/operator-framework/operator-registry/releases/download/${OPM_VERSION}/$${OS}-$${ARCH}-opm -o "$(OPM)";\
chmod +x $(LOCALBIN)/opm ;\
}
endif
.PHONY: preflight
PREFLIGHT = $(LOCALBIN)/preflight
preflight: ## Download preflight locally if necessary.
ifneq ($(shell $(PREFLIGHT) --version 2>/dev/null | awk '{print $$3}'), $(PREFLIGHT_VERSION))
@{ \
set -e ;\
mkdir -p $(LOCALBIN) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
if [ "$${OS}" != "linux" ] ; then \
echo "Unsupported OS: $${OS}" ;\
else \
curl -sSL "https://github.com/redhat-openshift-ecosystem/openshift-preflight/releases/download/${PREFLIGHT_VERSION}/preflight-$${OS}-$${ARCH}" -o "$(PREFLIGHT)" ;\
chmod +x $(LOCALBIN)/preflight ;\
fi \
}
endif

86
PROJECT Normal file
View File

@ -0,0 +1,86 @@
domain: cnpg.io
version: "3"
layout:
- go.kubebuilder.io/v4
projectName: cloudnative-pg-kubebuilderv4
repo: github.com/cloudnative-pg/cloudnative-pg
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: Cluster
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: Backup
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: ScheduledBackup
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: Pooler
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: Database
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: Publication
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cnpg.io
group: postgresql
kind: Subscription
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1

179
README.md Normal file
View File

@ -0,0 +1,179 @@
[![CNCF Landscape](https://img.shields.io/badge/CNCF%20Landscape-5699C6)][cncf-landscape]
[![Latest Release](https://img.shields.io/github/v/release/cloudnative-pg/cloudnative-pg.svg)][latest-release]
[![GitHub License](https://img.shields.io/github/license/cloudnative-pg/cloudnative-pg)][license]
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9933/badge)][openssf]
[![Documentation][documentation-badge]][documentation]
[![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow]
[![FOSSA Status][fossa-badge]][fossa]
# Welcome to the CloudNativePG Project!
**CloudNativePG (CNPG)** is an open-source platform designed to seamlessly
manage [PostgreSQL](https://www.postgresql.org/) databases in Kubernetes
environments. It covers the entire operational lifecycle—from deployment to
ongoing maintenance—through its core component, the CloudNativePG operator.
## Table of Contents
- [Code of Conduct](CODE_OF_CONDUCT.md)
- [Governance Policies](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md)
- [Contributing](CONTRIBUTING.md)
- [Adopters](ADOPTERS.md)
- [Commercial Support](https://cloudnative-pg.io/support/)
- [License](LICENSE)
## Getting Started
The best way to get started is the [Quickstart Guide](https://cloudnative-pg.io/documentation/current/quickstart/).
## Scope
### Mission
CloudNativePG aims to increase PostgreSQL adoption within Kubernetes by making
it an integral part of the development process and GitOps-driven CI/CD
automation.
### Core Principles & Features
Designed by PostgreSQL experts for Kubernetes administrators, CloudNativePG
follows a Kubernetes-native approach to PostgreSQL primary/standby cluster
management. Instead of relying on external high-availability tools (like
Patroni, repmgr, or Stolon), it integrates directly with the Kubernetes API to
automate database operations that a skilled DBA would perform manually.
Key design decisions include:
- Direct integration with Kubernetes API: The PostgreSQL clusters status is
available directly in the `Cluster` resource, allowing users to inspect it
via the Kubernetes API.
- Operator pattern: The operator ensures that the desired PostgreSQL state is
reconciled automatically, following Kubernetes best practices.
- Immutable application containers: Updates follow an immutable infrastructure
model, as explained in
["Why EDB Chose Immutable Application Containers"](https://www.enterprisedb.com/blog/why-edb-chose-immutable-application-containers).
### How CloudNativePG Works
The operator continuously monitors and updates the PostgreSQL cluster state.
Examples of automated actions include:
- Failover management: If the primary instance fails, the operator elects a new
primary, updates the cluster status, and orchestrates the transition.
- Scaling read replicas: When the number of desired replicas changes, the
operator provisions or removes resources such as persistent volumes, secrets,
and config maps while managing streaming replication.
- Service updates: Kubernetes remains the single source of truth, ensuring
that PostgreSQL service endpoints are always up to date.
- Rolling updates: When an image is updated, the operator follows a rolling
strategy—first updating replica pods before performing a controlled
switchover for the primary.
CloudNativePG manages additional Kubernetes resources to enhance PostgreSQL
management, including: `Backup`, `ClusterImageCatalog`, `Database`,
`ImageCatalog`, `Pooler`, `Publication`, `ScheduledBackup`, and `Subscription`.
## Out of Scope
- **Kubernetes only:** CloudNativePG is dedicated to vanilla Kubernetes
maintained by the [Cloud Native Computing Foundation
(CNCF)](https://kubernetes.io/).
- **PostgreSQL only:** CloudNativePG is dedicated to vanilla PostgreSQL
maintained by the [PostgreSQL Global Development Group
(PGDG)](https://www.postgresql.org/about/).
- **No support for forks:** Features from PostgreSQL forks will only be
considered if they can be integrated as extensions or pluggable frameworks.
- **Not a general-purpose database operator:** CloudNativePG does not support
other databases (e.g., MariaDB).
CloudNativePG can be extended via the [CNPG-I plugin interface](https://github.com/cloudnative-pg/cnpg-i).
## Communications
- [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions)
- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-30a6l6bp3-u1lNAmh~N02Cfiv2utKTFg)
- [Twitter](https://twitter.com/CloudNativePg)
- [Mastodon](https://mastodon.social/@CloudNativePG)
- [Bluesky](https://bsky.app/profile/cloudnativepg.bsky.social)
## Resources
- [Roadmap](https://github.com/orgs/cloudnative-pg/projects/1)
- [Website](https://cloudnative-pg.io)
- [FAQ](docs/src/faq.md)
- [Blog](https://cloudnative-pg.io/blog/)
- [CloudNativePG plugin Interface (CNPG-I)](https://github.com/cloudnative-pg/cnpg-i).
## Adopters
A list of publicly known users of the CloudNativePG operator is in [ADOPTERS.md](ADOPTERS.md).
Help us grow our community and CloudNativePG by adding yourself and your
organization to this list!
### CloudNativePG at KubeCon
- March 21 2024, KubeCon Europe 2024 in Paris: ["Scaling Heights: Mastering Postgres Database Vertical Scalability with Kubernetes Storage Magic"](https://kccnceu2024.sched.com/event/1YeM4/scaling-heights-mastering-postgres-database-vertical-scalability-with-kubernetes-storage-magic-gabriele-bartolini-edb-gari-singh-google) (Gari Singh, Google & Gabriele Bartolini, EDB)
- March 19 2024, Data on Kubernetes Day at KubeCon Europe 2024 in Paris: ["From Zero to Hero: Scaling Postgres in Kubernetes Using the Power of CloudNativePG"](https://colocatedeventseu2024.sched.com/event/1YFha/from-zero-to-hero-scaling-postgres-in-kubernetes-using-the-power-of-cloudnativepg-gabriele-bartolini-edb) (Gabriele Bartolini, EDB)
- 7 November 2023, KubeCon North America 2023 in Chicago: ["Disaster Recovery with Very Large Postgres Databases (in Kubernetes)"](https://kccncna2023.sched.com/event/1R2ml/disaster-recovery-with-very-large-postgres-databases-gabriele-bartolini-edb-michelle-au-google) (Michelle Au, Google & Gabriele Bartolini, EDB)
- 27 October 2022, KubeCon North America 2022 in Detroit: ["Data On Kubernetes, Deploying And Running PostgreSQL And Patterns For Databases In a Kubernetes Cluster"](https://kccncna2022.sched.com/event/182GB/data-on-kubernetes-deploying-and-running-postgresql-and-patterns-for-databases-in-a-kubernetes-cluster-chris-milsted-ondat-gabriele-bartolini-edb) (Chris Milsted, Ondat & Gabriele Bartolini, EDB)
### Useful links
- [Data on Kubernetes (DoK) Community](https://dok.community/)
- ["Cloud Neutral Postgres Databases with Kubernetes and CloudNativePG" by Gabriele Bartolini](https://www.cncf.io/blog/2024/11/20/cloud-neutral-postgres-databases-with-kubernetes-and-cloudnativepg/) (November 2024)
- ["How to migrate your PostgreSQL database in Kubernetes with ~0 downtime from anywhere" by Gabriele Bartolini](https://gabrielebartolini.it/articles/2024/03/cloudnativepg-recipe-5-how-to-migrate-your-postgresql-database-in-kubernetes-with-~0-downtime-from-anywhere/) (March 2024)
- ["Maximizing Microservice Databases with Kubernetes, Postgres, and CloudNativePG" by Gabriele Bartolini](https://gabrielebartolini.it/articles/2024/02/maximizing-microservice-databases-with-kubernetes-postgres-and-cloudnativepg/) (February 2024)
- ["Recommended Architectures for PostgreSQL in Kubernetes" by Gabriele Bartolini](https://www.cncf.io/blog/2023/09/29/recommended-architectures-for-postgresql-in-kubernetes/) (September 2023)
- ["The Current State of Major PostgreSQL Upgrades with CloudNativePG" by Gabriele Bartolini](https://www.enterprisedb.com/blog/current-state-major-postgresql-upgrades-cloudnativepg-kubernetes) (August 2023)
- ["The Rise of the Kubernetes Native Database" by Jeff Carpenter](https://thenewstack.io/the-rise-of-the-kubernetes-native-database/) (December 2022)
- ["Why Run Postgres in Kubernetes?" by Gabriele Bartolini](https://cloudnativenow.com/kubecon-cnc-eu-2022/why-run-postgres-in-kubernetes/) (May 2022)
- ["Shift-Left Security: The Path To PostgreSQL On Kubernetes" by Gabriele Bartolini](https://www.tfir.io/shift-left-security-the-path-to-postgresql-on-kubernetes/) (April 2021)
- ["Local Persistent Volumes and PostgreSQL usage in Kubernetes" by Gabriele Bartolini](https://www.2ndquadrant.com/en/blog/local-persistent-volumes-and-postgresql-usage-in-kubernetes/) (June 2020)
---
<p align="center">
We are a <a href="https://www.cncf.io/sandbox-projects/">Cloud Native Computing Foundation Sandbox project</a>.
</p>
<p style="text-align:center;" align="center">
<picture align="center">
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/white/cncf-white.svg?raw=true">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.svg?raw=true">
<img align="center" src="https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.svg?raw=true" alt="CNCF logo" width="50%"/>
</picture>
</p>
---
<p align="center">
CloudNativePG was originally built and sponsored by <a href="https://www.enterprisedb.com">EDB</a>.
</p>
<p style="text-align:center;" align="center">
<picture align="center">
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/cloudnative-pg/.github/main/logo/edb_landscape_color_white.svg">
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/cloudnative-pg/.github/main/logo/edb_landscape_color_grey.svg">
<img align="center" src="https://raw.githubusercontent.com/cloudnative-pg/.github/main/logo/edb_landscape_color_grey.svg" alt="EDB logo" width="25%"/>
</picture>
</p>
---
<p align="center">
<a href="https://www.postgresql.org/about/policies/trademarks/">Postgres, PostgreSQL, and the Slonik Logo</a>
are trademarks or registered trademarks of the PostgreSQL Community Association
of Canada, and used with their permission.
</p>
---
[cncf-landscape]: https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg
[stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg
[latest-release]: https://github.com/cloudnative-pg/cloudnative-pg/releases/latest
[documentation]: https://cloudnative-pg.io/documentation/current/
[license]: https://github.com/cloudnative-pg/cloudnative-pg?tab=Apache-2.0-1-ov-file#readme
[openssf]: https://www.bestpractices.dev/projects/9933
[documentation-badge]: https://img.shields.io/badge/Documentation-white?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGN0lEQVR4nJRXXWwcVxU%2B8%2F%2BzP%2BPZtR2v7dqy07jUJUALNaiK6lZyUVVKWgGKaIv8QCMekBAVQlQICcEzVZFQVYFKQhASEBHlISJPCRJEshTFChgrIYHEiYMh69jetffHM7Mzc%2B9Bs7vjnTs7yZpZWbt37s%2F5zne%2Bc861CD0eXRkbHc3NfjeffvxNAGEAgULD2756v35%2B3qe1Nc4fnQVEXlA2LnOcXlCF8S%2B6vvVgq%2FL3M65X3e51PvfQCU4WJgZe%2B8GQ8fS7AKgjBB8KEHwjDXZSjkf0CREAaXM2eI9c65siqWxWl360Xl74ANHz%2Fy8AitxnTBfmz%2BhyYS4wGhwObQCIHSA0AigOMBzvOsXzd4pnjyL6NMmWEH8hi2b28Og3%2FqRJA0ewfQy0v1vGO2NovwPo%2FEU%2FwVgSU1PI%2BSu79v3lJAB8HM%2BTI%2FO%2FUUXzM4xHIe0xI4DdRqOAwnF%2F38ePPyzaDIDh%2FMxcWh462m08aojuGY97C0nrAEHg9BlF0fmeAPr0J15vbaKsp0BZQzEDEAlP9B209UIIVXUta%2FQEQHwxgxFjTc%2BRskAwrgVWmHtg22vMPJwLDqGUNJIAMHVAkGu3WdpZz6NAkgSXpINSycluV28er1a3rJ4M3F2%2F9AtCvXKycRrTQttrjINjxxxIL9jevxdaDHU%2FTBr6pL5ruzuLZubgUQBOY2hPij3GBUe7tBCMBRE2KrXVSz0BBI%2FtPVgtV%2F%2FxkZ5WSjI%2F%2BFIXC3sHJwgT4yFqrZFFTSlVrp3sGYLwcfxSmXCbS00j2Ms4K7qkOsFx6qdTuiHtG4AimfmM8NyvOvR2G48qXtZ2fsfrN7%2BqpcRyUp0glKiimDm4TwAcHBp%2B9WeA4ki0GMWNR9OVF8BZvn7xtI%2FF09H8jzLEgz6yLwCDuelnFXHkTZZOytCOEdqDOtGwsm%2BNj00fXt%2B6%2Bj4vcA7bwNrZwENmXwAKuZnvsNRThs5ozMPfPiHyoDF7xiduHcXb70A8dRFheHjiySQATBZk0nl9MHPkBEWUoEtYjyrPFNwGzfdlD37Zdu98KCv%2BMmD2BYpUCvcST39e0%2BS1Wr249FAAg7mPzWrS5NstEbE0xrsiA6QN1PfRFLnhr%2BspxVJTlY8Mw1DqNXeyCQFREEXz9cHB0QOev73QaNhOF4B%2B45PHFHFgDhJTqjuubJFqX1KQco7NTTuW8kq95k2G4eLEGzM7lfItnjNeTKcOfV%2FT8hOuV77A9IK0XjgMpCO0ZiuV3L%2F6njCFAOmucGB3OII5XgCXEJTDdZLElVbu3Vz0fWexvL30k0B6ggBACOmIUBAEUKX0dDTvW7RCYcdZPq6n%2FSsQnUO2RuyBRgQ9Rc5mMvJ6CNIj1nXfd9qWAsCkaZzJAk1L8UjVqY737dSjfCGrPHWqXL32Q0mB%2F2BXnke00WaEYv2aTzAbnuV5pcWkDGAAGJmhSafh6hjr%2BW2SVYHrP7bb%2BOdPW%2FUgflGlTM2gaK%2Ft7tp6%2BN6yixdN89DcIwGktIFPABfNbwoQqQWEUnDJzg1g0jDeK5p7Kp7nensXFI7uyAr%2FLyM7fYLnpa6LYScE8vDnot5hrKlslm%2BfE3nVxJgO4o3KcYu%2FF8XM8yFQ27n%2F65Te%2FzKl3Jhpjj6TCIDneRD5%2FItxr1vdkALw7p1qfeWPpjHxMtsXaPxu6FLc%2BrnbSB1r7fcrlr36nqwMzQfnplJDryQCGOh%2FbLjhcM%2FEvQ4Pdund9xRV5m1LfTXaF%2BK9gsLGB9nsgddcz8thM%2FarPzYM8%2FFazf9sMFaU%2Fi%2FwvNANwEhPvUGR8ozn7d%2BiDKXixtKpbHp81nV9E7puRy31ixKUbOe%2Fv3Ud891ghhDrL5Z975eaOvV%2BCNRp0Gfz%2BcJjDABdTwlpdfKbId0t5XYAcHz5D5ZVtWUp9%2Flog2L7PgVJqZx0HOE5Cqghemv1%2Bt%2FeGBmZ%2BdB2yNN72UEpnzXG32YADA186i3bIpPxMhuKrFK%2Fd77JUnbkKbYvRJlC8DzKSZK76Lq1he2dKy%2BZuSfesSz5a2xHDbLJ%2BJaqdv5H4EUY%2BzbG2m9HgN7mg81bfw4W1uu7AjvHaqDhqF%2FZ3Fq5XFy%2FcESSDsx5fvZ7wLEsNfXk%2BjlVHfpSCOB%2FAQAA%2F%2F8zd8orZc2N9AAAAABJRU5ErkJggg%3D%3D
[fossa-badge]: https://app.fossa.com/api/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg.svg?type=small
[fossa]: https://app.fossa.com/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg?ref=badge_small

18
SECURITY.md Normal file
View File

@ -0,0 +1,18 @@
# Security Policy
## Supported Versions
For details on all Community supported versions of CloudNativePG, please refer to the
["Supported releases" section in the official documentation](https://cloudnative-pg.io/documentation/current/supported_releases/).
## Reporting a Vulnerability
To make a report, send an email containing the details of the vulnerability to
security@cloudnative-pg.io (an alias to a private mailing list in Google Groups
containing just the maintainers of the project). Private disclosure of a potential
vulnerability is important. The maintainers will reply acknowledging the report,
and decide whether to keep it private or publicly disclose it.
CloudNativePG relies on the
[GitHub infrastructure to manage security advisories and manage vulnerabilities](https://github.com/cloudnative-pg/cloudnative-pg/security).

30
SUPPORT.md Normal file
View File

@ -0,0 +1,30 @@
# Commercial Support for CloudNativePG
CloudNativePG is an independent open-source project and does not officially
endorse any specific company or service provider.
However, to assist users in finding professional support, the
"[Commercial Support](https://cloudnative-pg.io/support/)"
page offers an alphabetical list of companies and individuals providing
CloudNativePG-related products or services.
*Please note that the CloudNativePG authors are not responsible for the accuracy
or content provided by the listed companies or individuals.*
## How to Get Listed
To have your company or personal services featured on this list, please submit
a [pull request to the CloudNativePG website](https://github.com/cloudnative-pg/cloudnative-pg.github.io)
by adding a `.md` file in the [`content/support` folder](https://github.com/cloudnative-pg/cloudnative-pg.github.io/tree/main/content/support)
containing the following information:
1. **Organisation Name**: Clearly specify the name of your company or entity.
2. **Organisation Logo**: Provide your company logo in SVG format.
3. **Website Link**: Include a link to your homepage or a dedicated landing
page that explicitly mentions CloudNativePG support and includes at least one
link back to [cloudnative-pg.io](https://cloudnative-pg.io).
[CloudNativePG maintainers will vet each submission](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md#voting)
and reserve the right to reject your application or request changes if your website
doesnt clearly mention CloudNativePG support or if it doesn't include at least
one link back to [cloudnative-pg.io](https://cloudnative-pg.io).

270
api/v1/backup_funcs.go Normal file
View File

@ -0,0 +1,270 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"context"
"sort"
"strconv"
"strings"
"time"
volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
// SetAsFailed marks a certain backup as invalid
func (backupStatus *BackupStatus) SetAsFailed(
err error,
) {
backupStatus.Phase = BackupPhaseFailed
if err != nil {
backupStatus.Error = err.Error()
} else {
backupStatus.Error = ""
}
}
// SetAsFinalizing marks a certain backup as finalizing
func (backupStatus *BackupStatus) SetAsFinalizing() {
backupStatus.Phase = BackupPhaseFinalizing
backupStatus.Error = ""
}
// SetAsCompleted marks a certain backup as completed
func (backupStatus *BackupStatus) SetAsCompleted() {
backupStatus.Phase = BackupPhaseCompleted
backupStatus.Error = ""
backupStatus.StoppedAt = ptr.To(metav1.Now())
}
// SetAsStarted marks a certain backup as started
func (backupStatus *BackupStatus) SetAsStarted(podName, containerID string, method BackupMethod) {
backupStatus.Phase = BackupPhaseStarted
backupStatus.InstanceID = &InstanceID{
PodName: podName,
ContainerID: containerID,
}
backupStatus.Method = method
}
// SetSnapshotElements sets the Snapshots field from a list of VolumeSnapshot
func (snapshotStatus *BackupSnapshotStatus) SetSnapshotElements(snapshots []volumesnapshot.VolumeSnapshot) {
snapshotNames := make([]BackupSnapshotElementStatus, len(snapshots))
for idx, volumeSnapshot := range snapshots {
snapshotNames[idx] = BackupSnapshotElementStatus{
Name: volumeSnapshot.Name,
Type: volumeSnapshot.Annotations[utils.PvcRoleLabelName],
TablespaceName: volumeSnapshot.Labels[utils.TablespaceNameLabelName],
}
}
snapshotStatus.Elements = snapshotNames
}
// IsDone check if a backup is completed or still in progress
func (backupStatus *BackupStatus) IsDone() bool {
return backupStatus.Phase == BackupPhaseCompleted || backupStatus.Phase == BackupPhaseFailed
}
// GetOnline tells whether this backup was taken while the database
// was up
func (backupStatus *BackupStatus) GetOnline() bool {
if backupStatus.Online == nil {
return false
}
return *backupStatus.Online
}
// GetVolumeSnapshotDeadline returns the volume snapshot deadline in minutes.
func (backup *Backup) GetVolumeSnapshotDeadline() time.Duration {
const defaultValue = 10
value := backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]
if value == "" {
return defaultValue * time.Minute
}
minutes, err := strconv.Atoi(value)
if err != nil {
return defaultValue * time.Minute
}
return time.Duration(minutes) * time.Minute
}
// IsCompletedVolumeSnapshot checks if a backup is completed using the volume snapshot method.
// It returns true if the backup's method is BackupMethodVolumeSnapshot and its status phase is BackupPhaseCompleted.
// Otherwise, it returns false.
func (backup *Backup) IsCompletedVolumeSnapshot() bool {
return backup != nil &&
backup.Spec.Method == BackupMethodVolumeSnapshot &&
backup.Status.Phase == BackupPhaseCompleted
}
// IsInProgress check if a certain backup is in progress or not
func (backupStatus *BackupStatus) IsInProgress() bool {
return backupStatus.Phase == BackupPhasePending ||
backupStatus.Phase == BackupPhaseStarted ||
backupStatus.Phase == BackupPhaseRunning
}
// GetPendingBackupNames returns the pending backup list
func (list BackupList) GetPendingBackupNames() []string {
// Retry the backup if another backup is running
pendingBackups := make([]string, 0, len(list.Items))
for _, concurrentBackup := range list.Items {
if concurrentBackup.Status.IsDone() {
continue
}
if !concurrentBackup.Status.IsInProgress() {
pendingBackups = append(pendingBackups, concurrentBackup.Name)
}
}
return pendingBackups
}
// CanExecuteBackup control if we can start a reconciliation loop for a certain backup.
//
// A reconciliation loop can start if:
// - there's no backup running, and if the first of the sorted list of backups
// - the current backup is running and is the first running backup of the list
//
// As a side effect, this function will sort the backup list
func (list *BackupList) CanExecuteBackup(backupName string) bool {
var foundRunningBackup bool
list.SortByName()
for _, concurrentBackup := range list.Items {
if concurrentBackup.Status.IsInProgress() {
if backupName == concurrentBackup.Name && !foundRunningBackup {
return true
}
foundRunningBackup = true
if backupName != concurrentBackup.Name {
return false
}
}
}
pendingBackups := list.GetPendingBackupNames()
if len(pendingBackups) > 0 && pendingBackups[0] != backupName {
return false
}
return true
}
// SortByName sorts the backup items in alphabetical order
func (list *BackupList) SortByName() {
// Sort the list of backups in alphabetical order
sort.Slice(list.Items, func(i, j int) bool {
return strings.Compare(list.Items[i].Name, list.Items[j].Name) <= 0
})
}
// SortByReverseCreationTime sorts the backup items in reverse creation time (starting from the latest one)
func (list *BackupList) SortByReverseCreationTime() {
// Sort the list of backups in reverse creation time
sort.Slice(list.Items, func(i, j int) bool {
return list.Items[i].CreationTimestamp.Time.Compare(list.Items[j].CreationTimestamp.Time) > 0
})
}
// GetStatus gets the backup status
func (backup *Backup) GetStatus() *BackupStatus {
return &backup.Status
}
// GetMetadata get the metadata
func (backup *Backup) GetMetadata() *metav1.ObjectMeta {
return &backup.ObjectMeta
}
// GetName get the backup name
func (backup *Backup) GetName() string {
return backup.Name
}
// GetNamespace get the backup namespace
func (backup *Backup) GetNamespace() string {
return backup.Namespace
}
// GetAssignedInstance fetches the instance that was assigned to the backup execution
func (backup *Backup) GetAssignedInstance(ctx context.Context, cli client.Client) (*corev1.Pod, error) {
if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 {
return nil, nil
}
var previouslyElectedPod corev1.Pod
if err := cli.Get(
ctx,
client.ObjectKey{Namespace: backup.Namespace, Name: backup.Status.InstanceID.PodName},
&previouslyElectedPod,
); err != nil {
return nil, err
}
return &previouslyElectedPod, nil
}
// GetVolumeSnapshotConfiguration overrides the configuration value with the ones specified
// in the backup, if present.
func (backup *Backup) GetVolumeSnapshotConfiguration(
clusterConfig VolumeSnapshotConfiguration,
) VolumeSnapshotConfiguration {
config := clusterConfig
if backup.Spec.Online != nil {
config.Online = backup.Spec.Online
}
if backup.Spec.OnlineConfiguration != nil {
config.OnlineConfiguration = *backup.Spec.OnlineConfiguration
}
return config
}
// EnsureGVKIsPresent ensures that the GroupVersionKind (GVK) metadata is present in the Backup object.
// This is necessary because informers do not automatically include metadata inside the object.
// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object.
func (backup *Backup) EnsureGVKIsPresent() {
backup.SetGroupVersionKind(schema.GroupVersionKind{
Group: SchemeGroupVersion.Group,
Version: SchemeGroupVersion.Version,
Kind: BackupKind,
})
}
// IsEmpty checks if the plugin configuration is empty or not
func (configuration *BackupPluginConfiguration) IsEmpty() bool {
return configuration == nil || len(configuration.Name) == 0
}

482
api/v1/backup_funcs_test.go Normal file
View File

@ -0,0 +1,482 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"time"
volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("BackupStatus structure", func() {
It("can be set as started", func() {
status := BackupStatus{}
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-example-1",
},
Status: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{
{
ContainerID: "container-id",
},
},
},
}
status.SetAsStarted(pod.Name, pod.Status.ContainerStatuses[0].ContainerID, BackupMethodBarmanObjectStore)
Expect(status.Phase).To(BeEquivalentTo(BackupPhaseStarted))
Expect(status.InstanceID).ToNot(BeNil())
Expect(status.InstanceID.PodName).To(Equal("cluster-example-1"))
Expect(status.InstanceID.ContainerID).To(Equal("container-id"))
Expect(status.IsDone()).To(BeFalse())
})
It("can be set to contain a snapshot list", func() {
status := BackupStatus{}
status.BackupSnapshotStatus.SetSnapshotElements([]volumesnapshot.VolumeSnapshot{
{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-example-snapshot-1",
Annotations: map[string]string{
utils.PvcRoleLabelName: string(utils.PVCRolePgData),
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-example-snapshot-2",
Annotations: map[string]string{
utils.PvcRoleLabelName: string(utils.PVCRolePgWal),
},
},
},
})
Expect(status.BackupSnapshotStatus.Elements).To(HaveLen(2))
Expect(status.BackupSnapshotStatus.Elements).To(ContainElement(
BackupSnapshotElementStatus{Name: "cluster-example-snapshot-1", Type: string(utils.PVCRolePgData)}))
Expect(status.BackupSnapshotStatus.Elements).To(ContainElement(
BackupSnapshotElementStatus{Name: "cluster-example-snapshot-2", Type: string(utils.PVCRolePgWal)}))
})
Context("backup phases", func() {
When("the backup phase is `running`", func() {
It("can tell if a backup is in progress or done", func() {
b := BackupStatus{
Phase: BackupPhaseRunning,
}
Expect(b.IsInProgress()).To(BeTrue())
Expect(b.IsDone()).To(BeFalse())
})
})
When("the backup phase is `pending`", func() {
It("can tell if a backup is in progress or done", func() {
b := BackupStatus{
Phase: BackupPhasePending,
}
Expect(b.IsInProgress()).To(BeTrue())
Expect(b.IsDone()).To(BeFalse())
})
})
When("the backup phase is `completed`", func() {
It("can tell if a backup is in progress or done", func() {
b := BackupStatus{
Phase: BackupPhaseCompleted,
}
Expect(b.IsInProgress()).To(BeFalse())
Expect(b.IsDone()).To(BeTrue())
})
})
When("the backup phase is `failed`", func() {
It("can tell if a backup is in progress or done", func() {
b := BackupStatus{
Phase: BackupPhaseFailed,
}
Expect(b.IsInProgress()).To(BeFalse())
Expect(b.IsDone()).To(BeTrue())
})
})
})
})
var _ = Describe("BackupList structure", func() {
It("can be sorted by name", func() {
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-3",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-1",
},
},
},
}
backupList.SortByName()
Expect(backupList.Items).To(HaveLen(3))
Expect(backupList.Items[0].Name).To(Equal("backup-1"))
Expect(backupList.Items[1].Name).To(Equal("backup-2"))
Expect(backupList.Items[2].Name).To(Equal("backup-3"))
})
It("can be sorted by reverse creation time", func() {
now := time.Now()
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-ten-minutes",
CreationTimestamp: metav1.NewTime(now.Add(-10 * time.Minute)),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-five-minutes",
CreationTimestamp: metav1.NewTime(now.Add(-5 * time.Minute)),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-now",
CreationTimestamp: metav1.NewTime(now),
},
},
},
}
backupList.SortByReverseCreationTime()
Expect(backupList.Items).To(HaveLen(3))
Expect(backupList.Items[0].Name).To(Equal("backup-now"))
Expect(backupList.Items[1].Name).To(Equal("backup-five-minutes"))
Expect(backupList.Items[2].Name).To(Equal("backup-ten-minutes"))
})
It("can isolate pending backups", func() {
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-3",
},
Status: BackupStatus{
Phase: BackupPhaseRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-5",
},
Status: BackupStatus{
Phase: BackupPhaseCompleted,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-6",
},
Status: BackupStatus{
Phase: BackupPhaseFailed,
},
},
},
}
backupList.SortByName()
pendingBackups := backupList.GetPendingBackupNames()
Expect(pendingBackups).To(ConsistOf("backup-1", "backup-2"))
})
})
var _ = Describe("backup_controller volumeSnapshot unit tests", func() {
When("there's a running backup", func() {
It("prevents concurrent backups", func() {
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-2",
},
Status: BackupStatus{
Phase: BackupPhaseRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-3",
},
},
},
}
// The currently running backup can be executed
Expect(backupList.CanExecuteBackup("backup-1")).To(BeFalse())
Expect(backupList.CanExecuteBackup("backup-2")).To(BeTrue())
Expect(backupList.CanExecuteBackup("backup-3")).To(BeFalse())
})
})
When("there are no running backups", func() {
It("prevents concurrent backups", func() {
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-3",
},
},
},
}
// The currently running backup can be executed
Expect(backupList.CanExecuteBackup("backup-1")).To(BeTrue())
Expect(backupList.CanExecuteBackup("backup-2")).To(BeFalse())
Expect(backupList.CanExecuteBackup("backup-3")).To(BeFalse())
})
})
When("there are multiple running backups", func() {
It("prevents concurrent backups", func() {
// This could happen if there is a race condition, and in this case we use a
// tie-breaker algorithm
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-2",
},
Status: BackupStatus{
Phase: BackupPhaseRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-3",
},
Status: BackupStatus{
Phase: BackupPhaseRunning,
},
},
},
}
// The currently running backup can be executed
Expect(backupList.CanExecuteBackup("backup-1")).To(BeFalse())
Expect(backupList.CanExecuteBackup("backup-2")).To(BeTrue())
Expect(backupList.CanExecuteBackup("backup-3")).To(BeFalse())
})
})
When("there is a complete backup", func() {
It("prevents concurrent backups", func() {
backupList := BackupList{
Items: []Backup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-1",
},
Status: BackupStatus{
Phase: BackupPhaseCompleted,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-3",
},
},
},
}
// The currently running backup can be executed
Expect(backupList.CanExecuteBackup("backup-1")).To(BeFalse())
Expect(backupList.CanExecuteBackup("backup-2")).To(BeTrue())
Expect(backupList.CanExecuteBackup("backup-3")).To(BeFalse())
})
})
})
var _ = Describe("IsCompletedVolumeSnapshot", func() {
now := time.Now()
completedBackup := Backup{
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.NewTime(now),
Name: "completed-backup",
},
Spec: BackupSpec{
Method: BackupMethodVolumeSnapshot,
},
Status: BackupStatus{
Phase: BackupPhaseCompleted,
},
}
nonCompletedBackup := Backup{
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.NewTime(now),
Name: "non-completed-backup",
},
Spec: BackupSpec{
Method: BackupMethodVolumeSnapshot,
},
Status: BackupStatus{},
}
objectStoreBackup := Backup{
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.NewTime(now),
Name: "object-store-backup",
},
Spec: BackupSpec{
Method: BackupMethodBarmanObjectStore,
},
Status: BackupStatus{
Phase: BackupPhaseCompleted,
},
}
It("should return true for a completed volume snapshot", func() {
Expect(completedBackup.IsCompletedVolumeSnapshot()).To(BeTrue())
})
It("should return false for a completed objectStore", func() {
Expect(objectStoreBackup.IsCompletedVolumeSnapshot()).To(BeFalse())
})
It("should return false for an incomplete volume snapshot", func() {
Expect(nonCompletedBackup.IsCompletedVolumeSnapshot()).To(BeFalse())
})
})
var _ = Describe("GetVolumeSnapshotConfiguration", func() {
var (
backup *Backup
clusterConfig VolumeSnapshotConfiguration
resultConfig VolumeSnapshotConfiguration
onlineValue = true
onlineConfigVal = OnlineConfiguration{
WaitForArchive: ptr.To(true),
ImmediateCheckpoint: ptr.To(false),
}
)
BeforeEach(func() {
backup = &Backup{}
clusterConfig = VolumeSnapshotConfiguration{
Online: nil,
OnlineConfiguration: OnlineConfiguration{},
}
})
JustBeforeEach(func() {
resultConfig = backup.GetVolumeSnapshotConfiguration(clusterConfig)
})
Context("when backup spec has no overrides", func() {
It("should return clusterConfig as is", func() {
Expect(resultConfig).To(Equal(clusterConfig))
})
})
Context("when backup spec has Online override", func() {
BeforeEach(func() {
backup.Spec.Online = &onlineValue
})
It("should override the Online value in clusterConfig", func() {
Expect(*resultConfig.Online).To(Equal(onlineValue))
})
})
Context("when backup spec has OnlineConfiguration override", func() {
BeforeEach(func() {
backup.Spec.OnlineConfiguration = &onlineConfigVal
})
It("should override the OnlineConfiguration value in clusterConfig", func() {
Expect(resultConfig.OnlineConfiguration).To(Equal(onlineConfigVal))
})
})
Context("when backup spec has both Online and OnlineConfiguration override", func() {
BeforeEach(func() {
backup.Spec.Online = &onlineValue
backup.Spec.OnlineConfiguration = &onlineConfigVal
})
It("should override both Online and OnlineConfiguration values in clusterConfig", func() {
Expect(*resultConfig.Online).To(Equal(onlineValue))
Expect(resultConfig.OnlineConfiguration).To(Equal(onlineConfigVal))
})
})
})

350
api/v1/backup_types.go Normal file
View File

@ -0,0 +1,350 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
barmanApi "github.com/cloudnative-pg/barman-cloud/pkg/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// BackupPhase is the phase of the backup
type BackupPhase string
const (
// BackupPhasePending means that the backup is still waiting to be started
BackupPhasePending = "pending"
// BackupPhaseStarted means that the backup is now running
BackupPhaseStarted = "started"
// BackupPhaseRunning means that the backup is now running
BackupPhaseRunning = "running"
// BackupPhaseFinalizing means that a consistent backup have been
// taken and the operator is waiting for it to be ready to be
// used to restore a cluster.
// This phase is used for VolumeSnapshot backups, when a
// VolumeSnapshotContent have already been provisioned, but it is
// still now waiting for the `readyToUse` flag to be true.
BackupPhaseFinalizing = "finalizing"
// BackupPhaseCompleted means that the backup is now completed
BackupPhaseCompleted = "completed"
// BackupPhaseFailed means that the backup is failed
BackupPhaseFailed = "failed"
// BackupPhaseWalArchivingFailing means wal archiving isn't properly working
BackupPhaseWalArchivingFailing = "walArchivingFailing"
)
// BarmanCredentials an object containing the potential credentials for each cloud provider
// +kubebuilder:object:generate:=false
type BarmanCredentials = barmanApi.BarmanCredentials
// AzureCredentials is the type for the credentials to be used to upload
// files to Azure Blob Storage. The connection string contains every needed
// information. If the connection string is not specified, we'll need the
// storage account name and also one (and only one) of:
//
// - storageKey
// - storageSasToken
//
// - inheriting the credentials from the pod environment by setting inheritFromAzureAD to true
// +kubebuilder:object:generate:=false
type AzureCredentials = barmanApi.AzureCredentials
// BarmanObjectStoreConfiguration contains the backup configuration
// using Barman against an S3-compatible object storage
// +kubebuilder:object:generate:=false
type BarmanObjectStoreConfiguration = barmanApi.BarmanObjectStoreConfiguration
// DataBackupConfiguration is the configuration of the backup of
// the data directory
// +kubebuilder:object:generate:=false
type DataBackupConfiguration = barmanApi.DataBackupConfiguration
// GoogleCredentials is the type for the Google Cloud Storage credentials.
// This needs to be specified even if we run inside a GKE environment.
// +kubebuilder:object:generate:=false
type GoogleCredentials = barmanApi.GoogleCredentials
// S3Credentials is the type for the credentials to be used to upload
// files to S3. It can be provided in two alternative ways:
//
// - explicitly passing accessKeyId and secretAccessKey
//
// - inheriting the role from the pod environment by setting inheritFromIAMRole to true
// +kubebuilder:object:generate:=false
type S3Credentials = barmanApi.S3Credentials
// WalBackupConfiguration is the configuration of the backup of the
// WAL stream
// +kubebuilder:object:generate:=false
type WalBackupConfiguration = barmanApi.WalBackupConfiguration
// BackupMethod defines the way of executing the physical base backups of
// the selected PostgreSQL instance
type BackupMethod string
const (
// BackupMethodVolumeSnapshot means using the volume snapshot
// Kubernetes feature
BackupMethodVolumeSnapshot BackupMethod = "volumeSnapshot"
// BackupMethodBarmanObjectStore means using barman to backup the
// PostgreSQL cluster
BackupMethodBarmanObjectStore BackupMethod = "barmanObjectStore"
// BackupMethodPlugin means that this backup should be handled by
// a plugin
BackupMethodPlugin BackupMethod = "plugin"
)
// BackupSpec defines the desired state of Backup
type BackupSpec struct {
// The cluster to backup
Cluster LocalObjectReference `json:"cluster"`
// The policy to decide which instance should perform this backup. If empty,
// it defaults to `cluster.spec.backup.target`.
// Available options are empty string, `primary` and `prefer-standby`.
// `primary` to have backups run always on primary instances,
// `prefer-standby` to have backups run preferably on the most updated
// standby, if available.
// +optional
// +kubebuilder:validation:Enum=primary;prefer-standby
Target BackupTarget `json:"target,omitempty"`
// The backup method to be used, possible options are `barmanObjectStore`,
// `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
// +optional
// +kubebuilder:validation:Enum=barmanObjectStore;volumeSnapshot;plugin
// +kubebuilder:default:=barmanObjectStore
Method BackupMethod `json:"method,omitempty"`
// Configuration parameters passed to the plugin managing this backup
// +optional
PluginConfiguration *BackupPluginConfiguration `json:"pluginConfiguration,omitempty"`
// Whether the default type of backup with volume snapshots is
// online/hot (`true`, default) or offline/cold (`false`)
// Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
// +optional
Online *bool `json:"online,omitempty"`
// Configuration parameters to control the online/hot backup with volume snapshots
// Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
// +optional
OnlineConfiguration *OnlineConfiguration `json:"onlineConfiguration,omitempty"`
}
// BackupPluginConfiguration contains the backup configuration used by
// the backup plugin
type BackupPluginConfiguration struct {
// Name is the name of the plugin managing this backup
Name string `json:"name"`
// Parameters are the configuration parameters passed to the backup
// plugin for this backup
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
}
// BackupSnapshotStatus the fields exclusive to the volumeSnapshot method backup
type BackupSnapshotStatus struct {
// The elements list, populated with the gathered volume snapshots
// +optional
Elements []BackupSnapshotElementStatus `json:"elements,omitempty"`
}
// BackupSnapshotElementStatus is a volume snapshot that is part of a volume snapshot method backup
type BackupSnapshotElementStatus struct {
// Name is the snapshot resource name
Name string `json:"name"`
// Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE
Type string `json:"type"`
// TablespaceName is the name of the snapshotted tablespace. Only set
// when type is PG_TABLESPACE
// +optional
TablespaceName string `json:"tablespaceName,omitempty"`
}
// BackupStatus defines the observed state of Backup
type BackupStatus struct {
// The potential credentials for each cloud provider
BarmanCredentials `json:",inline"`
// EndpointCA store the CA bundle of the barman endpoint.
// Useful when using self-signed certificates to avoid
// errors with certificate issuer and barman-cloud-wal-archive.
// +optional
EndpointCA *SecretKeySelector `json:"endpointCA,omitempty"`
// Endpoint to be used to upload data to the cloud,
// overriding the automatic endpoint discovery
// +optional
EndpointURL string `json:"endpointURL,omitempty"`
// The path where to store the backup (i.e. s3://bucket/path/to/folder)
// this path, with different destination folders, will be used for WALs
// and for data. This may not be populated in case of errors.
// +optional
DestinationPath string `json:"destinationPath,omitempty"`
// The server name on S3, the cluster name is used if this
// parameter is omitted
// +optional
ServerName string `json:"serverName,omitempty"`
// Encryption method required to S3 API
// +optional
Encryption string `json:"encryption,omitempty"`
// The ID of the Barman backup
// +optional
BackupID string `json:"backupId,omitempty"`
// The Name of the Barman backup
// +optional
BackupName string `json:"backupName,omitempty"`
// The last backup status
// +optional
Phase BackupPhase `json:"phase,omitempty"`
// When the backup was started
// +optional
StartedAt *metav1.Time `json:"startedAt,omitempty"`
// When the backup was terminated
// +optional
StoppedAt *metav1.Time `json:"stoppedAt,omitempty"`
// The starting WAL
// +optional
BeginWal string `json:"beginWal,omitempty"`
// The ending WAL
// +optional
EndWal string `json:"endWal,omitempty"`
// The starting xlog
// +optional
BeginLSN string `json:"beginLSN,omitempty"`
// The ending xlog
// +optional
EndLSN string `json:"endLSN,omitempty"`
// The detected error
// +optional
Error string `json:"error,omitempty"`
// Unused. Retained for compatibility with old versions.
// +optional
CommandOutput string `json:"commandOutput,omitempty"`
// The backup command output in case of error
// +optional
CommandError string `json:"commandError,omitempty"`
// Backup label file content as returned by Postgres in case of online (hot) backups
// +optional
BackupLabelFile []byte `json:"backupLabelFile,omitempty"`
// Tablespace map file content as returned by Postgres in case of online (hot) backups
// +optional
TablespaceMapFile []byte `json:"tablespaceMapFile,omitempty"`
// Information to identify the instance where the backup has been taken from
// +optional
InstanceID *InstanceID `json:"instanceID,omitempty"`
// Status of the volumeSnapshot backup
// +optional
BackupSnapshotStatus BackupSnapshotStatus `json:"snapshotBackupStatus,omitempty"`
// The backup method being used
// +optional
Method BackupMethod `json:"method,omitempty"`
// Whether the backup was online/hot (`true`) or offline/cold (`false`)
// +optional
Online *bool `json:"online,omitempty"`
// A map containing the plugin metadata
// +optional
PluginMetadata map[string]string `json:"pluginMetadata,omitempty"`
}
// InstanceID contains the information to identify an instance
type InstanceID struct {
// The pod name
// +optional
PodName string `json:"podName,omitempty"`
// The container ID
// +optional
ContainerID string `json:"ContainerID,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
// +kubebuilder:printcolumn:name="Method",type="string",JSONPath=".spec.method"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error"
// A Backup resource is a request for a PostgreSQL backup by the user.
type Backup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Specification of the desired behavior of the backup.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec BackupSpec `json:"spec"`
// Most recently observed status of the backup. This data may not be up to
// date. Populated by the system. Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status BackupStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// BackupList contains a list of Backup
type BackupList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
// List of backups
Items []Backup `json:"items"`
}
func init() {
SchemeBuilder.Register(&Backup{}, &BackupList{})
}

77
api/v1/base_funcs.go Normal file
View File

@ -0,0 +1,77 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
// SecretKeySelectorToCore transforms a SecretKeySelector structure to the
// analogue one in the corev1 namespace
func SecretKeySelectorToCore(selector *SecretKeySelector) *corev1.SecretKeySelector {
if selector == nil {
return nil
}
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: selector.LocalObjectReference.Name,
},
Key: selector.Key,
}
}
// ConfigMapKeySelectorToCore transforms a ConfigMapKeySelector structure to the analogue
// one in the corev1 namespace
func ConfigMapKeySelectorToCore(selector *ConfigMapKeySelector) *corev1.ConfigMapKeySelector {
if selector == nil {
return nil
}
return &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: selector.Name,
},
Key: selector.Key,
}
}
// ListStatusPods return a list of active Pods
func ListStatusPods(podList []corev1.Pod) map[PodStatus][]string {
podsNames := make(map[PodStatus][]string)
for _, pod := range podList {
if !pod.DeletionTimestamp.IsZero() {
continue
}
switch {
case utils.IsPodReady(pod):
podsNames[PodHealthy] = append(podsNames[PodHealthy], pod.Name)
case utils.IsPodActive(pod):
podsNames[PodReplicating] = append(podsNames[PodReplicating], pod.Name)
default:
podsNames[PodFailed] = append(podsNames[PodFailed], pod.Name)
}
}
return podsNames
}

166
api/v1/base_funcs_test.go Normal file
View File

@ -0,0 +1,166 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Base type mappings for secrets", func() {
It("correctly map nil values", func() {
Expect(SecretKeySelectorToCore(nil)).To(BeNil())
})
It("correctly map non-nil values", func() {
selector := SecretKeySelector{
LocalObjectReference: LocalObjectReference{
Name: "thisName",
},
Key: "thisKey",
}
Expect(selector.Name).To(Equal("thisName"))
Expect(selector.Key).To(Equal("thisKey"))
})
})
var _ = Describe("Base type mappings for configmaps", func() {
It("correctly map nil values", func() {
Expect(ConfigMapKeySelectorToCore(nil)).To(BeNil())
})
It("correctly map non-nil values", func() {
selector := ConfigMapKeySelector{
LocalObjectReference: LocalObjectReference{
Name: "thisName",
},
Key: "thisKey",
}
Expect(selector.Name).To(Equal("thisName"))
Expect(selector.Key).To(Equal("thisKey"))
})
})
var _ = Describe("Properly builds ListStatusPods", func() {
healthyPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "healthyPod",
},
Status: corev1.PodStatus{
Conditions: []corev1.PodCondition{
{
Type: corev1.ContainersReady,
Status: corev1.ConditionTrue,
},
},
},
}
activePod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "activePod",
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
Conditions: []corev1.PodCondition{
{
Type: corev1.ContainersReady,
Status: corev1.ConditionFalse,
},
},
},
}
failedPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "failedPod",
},
Status: corev1.PodStatus{
Phase: corev1.PodFailed,
Conditions: []corev1.PodCondition{
{
Type: corev1.ContainersReady,
Status: corev1.ConditionFalse,
},
},
},
}
now := metav1.Now()
terminatingPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "terminatingPod",
DeletionTimestamp: &now,
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
Conditions: []corev1.PodCondition{
{
Type: corev1.ContainersReady,
Status: corev1.ConditionTrue,
},
},
},
}
It("Detects healthy pods", func() {
podList := []corev1.Pod{healthyPod, healthyPod}
expectedStatus := map[PodStatus][]string{
PodHealthy: {"healthyPod", "healthyPod"},
}
podStatus := ListStatusPods(podList)
Expect(podStatus).To(BeEquivalentTo(expectedStatus))
})
It("Detects active pods", func() {
podList := []corev1.Pod{healthyPod, activePod}
expectedStatus := map[PodStatus][]string{
PodHealthy: {"healthyPod"},
PodReplicating: {"activePod"},
}
podStatus := ListStatusPods(podList)
Expect(podStatus).To(BeEquivalentTo(expectedStatus))
})
It("Detects failed pods", func() {
podList := []corev1.Pod{healthyPod, activePod, failedPod}
expectedStatus := map[PodStatus][]string{
PodHealthy: {"healthyPod"},
PodReplicating: {"activePod"},
PodFailed: {"failedPod"},
}
podStatus := ListStatusPods(podList)
Expect(podStatus).To(BeEquivalentTo(expectedStatus))
})
It("Excludes terminating pods", func() {
podList := []corev1.Pod{healthyPod, activePod, failedPod, terminatingPod}
expectedStatus := map[PodStatus][]string{
PodHealthy: {"healthyPod"},
PodReplicating: {"activePod"},
PodFailed: {"failedPod"},
}
podStatus := ListStatusPods(podList)
Expect(podStatus).To(BeEquivalentTo(expectedStatus))
})
})

53
api/v1/base_types.go Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
machineryapi "github.com/cloudnative-pg/machinery/pkg/api"
)
// PodStatus represent the possible status of pods
type PodStatus string
const (
// PodHealthy means that a Pod is active and ready
PodHealthy = "healthy"
// PodReplicating means that a Pod is still not ready but still active
PodReplicating = "replicating"
// PodFailed means that a Pod will not be scheduled again (deleted or evicted)
PodFailed = "failed"
)
// LocalObjectReference contains enough information to let you locate a
// local object with a known type inside the same namespace
// +kubebuilder:object:generate:=false
type LocalObjectReference = machineryapi.LocalObjectReference
// SecretKeySelector contains enough information to let you locate
// the key of a Secret
// +kubebuilder:object:generate:=false
type SecretKeySelector = machineryapi.SecretKeySelector
// ConfigMapKeySelector contains enough information to let you locate
// the key of a ConfigMap
// +kubebuilder:object:generate:=false
type ConfigMapKeySelector = machineryapi.ConfigMapKeySelector

View File

@ -0,0 +1,54 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// A Condition that can be used to communicate the Backup progress
var (
// BackupSucceededCondition is added to a backup
// when it was completed correctly
BackupSucceededCondition = metav1.Condition{
Type: string(ConditionBackup),
Status: metav1.ConditionTrue,
Reason: string(ConditionReasonLastBackupSucceeded),
Message: "Backup was successful",
}
// BackupStartingCondition is added to a backup
// when it started
BackupStartingCondition = metav1.Condition{
Type: string(ConditionBackup),
Status: metav1.ConditionFalse,
Reason: string(ConditionBackupStarted),
Message: "New Backup starting up",
}
// BuildClusterBackupFailedCondition builds
// ConditionReasonLastBackupFailed condition
BuildClusterBackupFailedCondition = func(err error) metav1.Condition {
return metav1.Condition{
Type: string(ConditionBackup),
Status: metav1.ConditionFalse,
Reason: string(ConditionReasonLastBackupFailed),
Message: err.Error(),
}
}
)

266
api/v1/cluster_defaults.go Normal file
View File

@ -0,0 +1,266 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/cloudnative-pg/machinery/pkg/stringset"
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
const (
// DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries
DefaultMonitoringKey = "queries"
// DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries,
// if configured
DefaultMonitoringConfigMapName = "cnpg-default-monitoring"
// DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries,
// if configured
DefaultMonitoringSecretName = DefaultMonitoringConfigMapName
// DefaultApplicationDatabaseName is the name of application database if not specified
DefaultApplicationDatabaseName = "app"
// DefaultApplicationUserName is the name of application database owner if not specified
DefaultApplicationUserName = DefaultApplicationDatabaseName
)
// Default apply the defaults to undefined values in a Cluster preserving the user settings
func (r *Cluster) Default() {
r.setDefaults(true)
}
// SetDefaults apply the defaults to undefined values in a Cluster
func (r *Cluster) SetDefaults() {
r.setDefaults(false)
}
func (r *Cluster) setDefaults(preserveUserSettings bool) {
// Defaulting the image name if not specified
if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil {
r.Spec.ImageName = configuration.Current.PostgresImageName
}
// Defaulting the bootstrap method if not specified
if r.Spec.Bootstrap == nil {
r.Spec.Bootstrap = &BootstrapConfiguration{}
}
// Defaulting initDB if no other bootstrap method was passed
switch {
case r.Spec.Bootstrap.Recovery != nil:
r.defaultRecovery()
case r.Spec.Bootstrap.PgBaseBackup != nil:
r.defaultPgBaseBackup()
default:
r.defaultInitDB()
}
// Defaulting the pod anti-affinity type if podAntiAffinity
if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) &&
r.Spec.Affinity.PodAntiAffinityType == "" {
r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred
}
if r.Spec.Backup != nil && r.Spec.Backup.Target == "" {
r.Spec.Backup.Target = DefaultBackupTarget
}
psqlVersion, err := r.GetPostgresqlVersion()
if err == nil {
// The validation error will be already raised by the
// validateImageName function
info := postgres.ConfigurationInfo{
Settings: postgres.CnpgConfigurationSettings,
Version: psqlVersion,
UserSettings: r.Spec.PostgresConfiguration.Parameters,
IsReplicaCluster: r.IsReplica(),
PreserveFixedSettingsFromUser: preserveUserSettings,
IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta),
IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem,
}
sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters()
r.Spec.PostgresConfiguration.Parameters = sanitizedParameters
}
if r.Spec.LogLevel == "" {
r.Spec.LogLevel = log.InfoLevelString
}
// we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty
// and defaultQueries not disabled on cluster crd
if !r.Spec.Monitoring.AreDefaultQueriesDisabled() {
r.defaultMonitoringQueries(configuration.Current)
}
// If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots
if r.Spec.ReplicationSlots == nil {
r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{}
}
if r.Spec.ReplicationSlots.HighAvailability == nil {
r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
SlotPrefix: "_cnpg_",
}
}
if r.Spec.ReplicationSlots.SynchronizeReplicas == nil {
r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{
Enabled: ptr.To(true),
}
}
if len(r.Spec.Tablespaces) > 0 {
r.defaultTablespaces()
}
r.setDefaultPlugins(configuration.Current)
}
func (r *Cluster) setDefaultPlugins(config *configuration.Data) {
// Add the list of pre-defined plugins
foundPlugins := stringset.New()
for _, plugin := range r.Spec.Plugins {
foundPlugins.Put(plugin.Name)
}
for _, pluginName := range config.GetIncludePlugins() {
if !foundPlugins.Has(pluginName) {
r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{
Name: pluginName,
Enabled: ptr.To(true),
})
}
}
}
// defaultTablespaces adds the tablespace owner where the
// user didn't specify it
func (r *Cluster) defaultTablespaces() {
defaultOwner := r.GetApplicationDatabaseOwner()
if len(defaultOwner) == 0 {
defaultOwner = "postgres"
}
for name, tablespaceConfiguration := range r.Spec.Tablespaces {
if len(tablespaceConfiguration.Owner.Name) == 0 {
tablespaceConfiguration.Owner.Name = defaultOwner
}
r.Spec.Tablespaces[name] = tablespaceConfiguration
}
}
// defaultMonitoringQueries adds the default monitoring queries configMap
// if not already present in CustomQueriesConfigMap
func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) {
if r.Spec.Monitoring == nil {
r.Spec.Monitoring = &MonitoringConfiguration{}
}
if config.MonitoringQueriesConfigmap != "" {
var defaultConfigMapQueriesAlreadyPresent bool
// We check if the default queries are already inserted in the monitoring configuration
for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap {
if monitoringConfigMap.Name == DefaultMonitoringConfigMapName {
defaultConfigMapQueriesAlreadyPresent = true
break
}
}
// If the default queries are already present there is no need to re-add them.
// Please note that in this case that the default configMap could overwrite user existing queries
// depending on the order. This is an accepted behavior because the user willingly defined the order of his array
if !defaultConfigMapQueriesAlreadyPresent {
r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{
{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
Key: DefaultMonitoringKey,
},
}, r.Spec.Monitoring.CustomQueriesConfigMap...)
}
}
if config.MonitoringQueriesSecret != "" {
var defaultSecretQueriesAlreadyPresent bool
// we check if the default queries are already inserted in the monitoring configuration
for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret {
if monitoringSecret.Name == DefaultMonitoringSecretName {
defaultSecretQueriesAlreadyPresent = true
break
}
}
if !defaultSecretQueriesAlreadyPresent {
r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{
{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
Key: DefaultMonitoringKey,
},
}, r.Spec.Monitoring.CustomQueriesSecret...)
}
}
}
// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed
func (r *Cluster) defaultInitDB() {
if r.Spec.Bootstrap.InitDB == nil {
r.Spec.Bootstrap.InitDB = &BootstrapInitDB{
Database: DefaultApplicationDatabaseName,
Owner: DefaultApplicationUserName,
}
}
if r.Spec.Bootstrap.InitDB.Database == "" {
r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName
}
if r.Spec.Bootstrap.InitDB.Owner == "" {
r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database
}
if r.Spec.Bootstrap.InitDB.Encoding == "" {
r.Spec.Bootstrap.InitDB.Encoding = "UTF8"
}
if r.Spec.Bootstrap.InitDB.LocaleCollate == "" {
r.Spec.Bootstrap.InitDB.LocaleCollate = "C"
}
if r.Spec.Bootstrap.InitDB.LocaleCType == "" {
r.Spec.Bootstrap.InitDB.LocaleCType = "C"
}
}
// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed
func (r *Cluster) defaultRecovery() {
if r.Spec.Bootstrap.Recovery.Database == "" {
r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName
}
if r.Spec.Bootstrap.Recovery.Owner == "" {
r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database
}
}
// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed
func (r *Cluster) defaultPgBaseBackup() {
if r.Spec.Bootstrap.PgBaseBackup.Database == "" {
r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName
}
if r.Spec.Bootstrap.PgBaseBackup.Owner == "" {
r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database
}
}

View File

@ -0,0 +1,319 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("cluster default configuration", func() {
It("defaults to creating an application database", func() {
cluster := Cluster{}
cluster.Default()
Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app"))
Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app"))
})
It("defaults the owner user with the database name", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
InitDB: &BootstrapInitDB{
Database: "appdb",
},
},
},
}
cluster.Default()
Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb"))
})
It("defaults to create an application database if recovery is used", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
Recovery: &BootstrapRecovery{},
},
},
}
cluster.Default()
Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue())
Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty())
Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty())
Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil())
})
It("defaults the owner user with the database name for recovery", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
Recovery: &BootstrapRecovery{
Database: "appdb",
},
},
},
}
cluster.Default()
Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb"))
})
It("defaults to create an application database if pg_basebackup is used", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
PgBaseBackup: &BootstrapPgBaseBackup{},
},
},
}
cluster.Default()
Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue())
Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty())
Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty())
Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil())
})
It("defaults the owner user with the database name for pg_basebackup", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
PgBaseBackup: &BootstrapPgBaseBackup{
Database: "appdb",
},
},
},
}
cluster.Default()
Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb"))
})
It("defaults the PostgreSQL configuration with parameters from the operator", func() {
cluster := Cluster{}
cluster.Default()
Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty())
})
It("defaults the anti-affinity", func() {
cluster := Cluster{
Spec: ClusterSpec{
Affinity: AffinityConfiguration{},
},
}
cluster.Default()
Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred))
Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil())
})
It("should fill the image name if isn't already set", func() {
cluster := Cluster{}
cluster.Default()
Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName))
})
It("shouldn't set the image name if already present", func() {
cluster := Cluster{
Spec: ClusterSpec{
ImageName: "test:13",
},
}
cluster.Default()
Expect(cluster.Spec.ImageName).To(Equal("test:13"))
})
It("should setup the application database name", func() {
cluster := Cluster{}
cluster.Default()
Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app"))
Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app"))
})
It("should set the owner name as the database name", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
InitDB: &BootstrapInitDB{
Database: "test",
},
},
},
}
cluster.Default()
Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test"))
Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test"))
})
It("should not overwrite application database and owner settings", func() {
cluster := Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
InitDB: &BootstrapInitDB{
Database: "testdb",
Owner: "testuser",
},
},
},
}
cluster.Default()
Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb"))
Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser"))
})
})
var _ = Describe("Default monitoring queries", func() {
It("correctly set the default monitoring queries configmap and secret when none is already specified", func() {
cluster := &Cluster{}
cluster.defaultMonitoringQueries(&configuration.Data{
MonitoringQueriesSecret: "test-secret",
MonitoringQueriesConfigmap: "test-configmap",
})
Expect(cluster.Spec.Monitoring).NotTo(BeNil())
Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).
To(ContainElement(ConfigMapKeySelector{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
Key: DefaultMonitoringKey,
}))
Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
Expect(cluster.Spec.Monitoring.CustomQueriesSecret).
To(ContainElement(SecretKeySelector{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
Key: DefaultMonitoringKey,
}))
})
testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{
CustomQueriesConfigMap: []ConfigMapKeySelector{
{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
Key: "test2",
},
},
CustomQueriesSecret: []SecretKeySelector{
{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
Key: "test3",
},
},
}}}
It("correctly set the default monitoring queries configmap when other metrics are already specified", func() {
modifiedCluster := testCluster.DeepCopy()
modifiedCluster.defaultMonitoringQueries(&configuration.Data{
MonitoringQueriesConfigmap: "test-configmap",
})
Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil())
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
To(ContainElement(ConfigMapKeySelector{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
Key: "test2",
}))
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret))
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap))
})
It("correctly set the default monitoring queries secret when other metrics are already specified", func() {
modifiedCluster := testCluster.DeepCopy()
modifiedCluster.defaultMonitoringQueries(&configuration.Data{
MonitoringQueriesSecret: "test-secret",
})
Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil())
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
To(ContainElement(SecretKeySelector{
LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
Key: "test3",
}))
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap))
Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret))
})
})
var _ = Describe("setDefaultPlugins", func() {
It("adds pre-defined plugins if not already present", func() {
cluster := &Cluster{
Spec: ClusterSpec{
Plugins: []PluginConfiguration{
{Name: "existing-plugin", Enabled: ptr.To(true)},
},
},
}
config := &configuration.Data{
IncludePlugins: "predefined-plugin1,predefined-plugin2",
}
cluster.setDefaultPlugins(config)
Expect(cluster.Spec.Plugins).To(
ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)}))
Expect(cluster.Spec.Plugins).To(
ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)}))
Expect(cluster.Spec.Plugins).To(
ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)}))
})
It("does not add pre-defined plugins if already present", func() {
cluster := &Cluster{
Spec: ClusterSpec{
Plugins: []PluginConfiguration{
{Name: "predefined-plugin1", Enabled: ptr.To(false)},
},
},
}
config := &configuration.Data{
IncludePlugins: "predefined-plugin1,predefined-plugin2",
}
cluster.setDefaultPlugins(config)
Expect(cluster.Spec.Plugins).To(HaveLen(2))
Expect(cluster.Spec.Plugins).To(
ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)}))
Expect(cluster.Spec.Plugins).To(
ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)}))
})
It("handles empty plugin list gracefully", func() {
cluster := &Cluster{}
config := &configuration.Data{
IncludePlugins: "predefined-plugin1",
}
cluster.setDefaultPlugins(config)
Expect(cluster.Spec.Plugins).To(HaveLen(1))
Expect(cluster.Spec.Plugins).To(
ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)}))
})
})

1495
api/v1/cluster_funcs.go Normal file

File diff suppressed because it is too large Load Diff

1739
api/v1/cluster_funcs_test.go Normal file

File diff suppressed because it is too large Load Diff

2462
api/v1/cluster_types.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
// GetSpec returns the Spec of the ClusterImageCatalog
func (c *ClusterImageCatalog) GetSpec() *ImageCatalogSpec {
return &c.Spec
}

View File

@ -0,0 +1,54 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// ClusterImageCatalog is the Schema for the clusterimagecatalogs API
type ClusterImageCatalog struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Specification of the desired behavior of the ClusterImageCatalog.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec ImageCatalogSpec `json:"spec"`
}
// +kubebuilder:object:root=true
// ClusterImageCatalogList contains a list of ClusterImageCatalog
type ClusterImageCatalogList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metav1.ListMeta `json:"metadata"`
// List of ClusterImageCatalogs
Items []ClusterImageCatalog `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterImageCatalog{}, &ClusterImageCatalogList{})
}

47
api/v1/common_types.go Normal file
View File

@ -0,0 +1,47 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
// VolumeSnapshotKind this is a strongly typed reference to the kind used by the volumesnapshot package
const VolumeSnapshotKind = "VolumeSnapshot"
// Metadata is a structure similar to the metav1.ObjectMeta, but still
// parseable by controller-gen to create a suitable CRD for the user.
// The comment of PodTemplateSpec has an explanation of why we are
// not using the core data types.
type Metadata struct {
// The name of the resource. Only supported for certain types
// +optional
Name string `json:"name,omitempty"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: http://kubernetes.io/docs/user-guide/labels
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: http://kubernetes.io/docs/user-guide/annotations
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
}

90
api/v1/database_funcs.go Normal file
View File

@ -0,0 +1,90 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
)
// SetAsFailed sets the database as failed with the given error
func (db *Database) SetAsFailed(err error) {
db.Status.Applied = ptr.To(false)
db.Status.Message = err.Error()
}
// SetAsUnknown sets the database as unknown with the given error
func (db *Database) SetAsUnknown(err error) {
db.Status.Applied = nil
db.Status.Message = err.Error()
}
// SetAsReady sets the database as working correctly
func (db *Database) SetAsReady() {
db.Status.Applied = ptr.To(true)
db.Status.Message = ""
db.Status.ObservedGeneration = db.Generation
}
// GetStatusMessage returns the status message of the database
func (db *Database) GetStatusMessage() string {
return db.Status.Message
}
// GetClusterRef returns the cluster reference of the database
func (db *Database) GetClusterRef() corev1.LocalObjectReference {
return db.Spec.ClusterRef
}
// GetManagedObjectName returns the name of the managed database object
func (db *Database) GetManagedObjectName() string {
return db.Spec.Name
}
// GetName returns the database object name
func (db *Database) GetName() string {
return db.Name
}
// HasReconciliations returns true if the database object has been reconciled at least once
func (db *Database) HasReconciliations() bool {
return db.Status.ObservedGeneration > 0
}
// SetStatusObservedGeneration sets the observed generation of the database
func (db *Database) SetStatusObservedGeneration(obsGeneration int64) {
db.Status.ObservedGeneration = obsGeneration
}
// MustHaveManagedResourceExclusivity detects conflicting databases
func (dbList *DatabaseList) MustHaveManagedResourceExclusivity(reference *Database) error {
pointers := toSliceWithPointers(dbList.Items)
return ensureManagedResourceExclusivity(reference, pointers)
}
// GetEnsure gets the ensure status of the resource
func (dbObject DatabaseObjectSpec) GetEnsure() EnsureOption {
return dbObject.Ensure
}
// GetName gets the name of the resource
func (dbObject DatabaseObjectSpec) GetName() string {
return dbObject.Name
}

297
api/v1/database_types.go Normal file
View File

@ -0,0 +1,297 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DatabaseReclaimPolicy describes a policy for end-of-life maintenance of databases.
// +enum
type DatabaseReclaimPolicy string
const (
// DatabaseReclaimDelete means the database will be deleted from its PostgreSQL Cluster on release
// from its claim.
DatabaseReclaimDelete DatabaseReclaimPolicy = "delete"
// DatabaseReclaimRetain means the database will be left in its current phase for manual
// reclamation by the administrator. The default policy is Retain.
DatabaseReclaimRetain DatabaseReclaimPolicy = "retain"
)
// DatabaseSpec is the specification of a Postgresql Database, built around the
// `CREATE DATABASE`, `ALTER DATABASE`, and `DROP DATABASE` SQL commands of
// PostgreSQL.
// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`"
// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`"
// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`"
type DatabaseSpec struct {
// The name of the PostgreSQL cluster hosting the database.
ClusterRef corev1.LocalObjectReference `json:"cluster"`
// Ensure the PostgreSQL database is `present` or `absent` - defaults to "present".
// +kubebuilder:default:="present"
// +kubebuilder:validation:Enum=present;absent
// +optional
Ensure EnsureOption `json:"ensure,omitempty"`
// The name of the database to create inside PostgreSQL. This setting cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
// +kubebuilder:validation:XValidation:rule="self != 'postgres'",message="the name postgres is reserved"
// +kubebuilder:validation:XValidation:rule="self != 'template0'",message="the name template0 is reserved"
// +kubebuilder:validation:XValidation:rule="self != 'template1'",message="the name template1 is reserved"
Name string `json:"name"`
// Maps to the `OWNER` parameter of `CREATE DATABASE`.
// Maps to the `OWNER TO` command of `ALTER DATABASE`.
// The role name of the user who owns the database inside PostgreSQL.
Owner string `json:"owner"`
// Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting
// cannot be changed. The name of the template from which to create
// this database.
// +optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="template is immutable"
Template string `json:"template,omitempty"`
// Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting
// cannot be changed. Character set encoding to use in the database.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="encoding is immutable"
// +optional
Encoding string `json:"encoding,omitempty"`
// Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting
// cannot be changed. Sets the default collation order and character
// classification in the new database.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable"
// +optional
Locale string `json:"locale,omitempty"`
// Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This
// setting cannot be changed. This option sets the locale provider for
// databases created in the new cluster. Available from PostgreSQL 16.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeProvider is immutable"
// +optional
LocaleProvider string `json:"localeProvider,omitempty"`
// Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This
// setting cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCollate is immutable"
// +optional
LcCollate string `json:"localeCollate,omitempty"`
// Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting
// cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCType is immutable"
// +optional
LcCtype string `json:"localeCType,omitempty"`
// Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This
// setting cannot be changed. Specifies the ICU locale when the ICU
// provider is used. This option requires `localeProvider` to be set to
// `icu`. Available from PostgreSQL 15.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuLocale is immutable"
// +optional
IcuLocale string `json:"icuLocale,omitempty"`
// Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting
// cannot be changed. Specifies additional collation rules to customize
// the behavior of the default collation. This option requires
// `localeProvider` to be set to `icu`. Available from PostgreSQL 16.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuRules is immutable"
// +optional
IcuRules string `json:"icuRules,omitempty"`
// Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This
// setting cannot be changed. Specifies the locale name when the
// builtin provider is used. This option requires `localeProvider` to
// be set to `builtin`. Available from PostgreSQL 17.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtinLocale is immutable"
// +optional
BuiltinLocale string `json:"builtinLocale,omitempty"`
// Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This
// setting cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collationVersion is immutable"
// +optional
CollationVersion string `json:"collationVersion,omitempty"`
// Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER
// DATABASE`. If true, this database is considered a template and can
// be cloned by any user with `CREATEDB` privileges.
// +optional
IsTemplate *bool `json:"isTemplate,omitempty"`
// Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and
// `ALTER DATABASE`. If false then no one can connect to this database.
// +optional
AllowConnections *bool `json:"allowConnections,omitempty"`
// Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and
// `ALTER DATABASE`. How many concurrent connections can be made to
// this database. -1 (the default) means no limit.
// +optional
ConnectionLimit *int `json:"connectionLimit,omitempty"`
// Maps to the `TABLESPACE` parameter of `CREATE DATABASE`.
// Maps to the `SET TABLESPACE` command of `ALTER DATABASE`.
// The name of the tablespace (in PostgreSQL) that will be associated
// with the new database. This tablespace will be the default
// tablespace used for objects created in this database.
// +optional
Tablespace string `json:"tablespace,omitempty"`
// The policy for end-of-life maintenance of this database.
// +kubebuilder:validation:Enum=delete;retain
// +kubebuilder:default:=retain
// +optional
ReclaimPolicy DatabaseReclaimPolicy `json:"databaseReclaimPolicy,omitempty"`
// The list of schemas to be managed in the database
// +optional
Schemas []SchemaSpec `json:"schemas,omitempty"`
// The list of extensions to be managed in the database
// +optional
Extensions []ExtensionSpec `json:"extensions,omitempty"`
}
// DatabaseObjectSpec contains the fields which are common to every
// database object
type DatabaseObjectSpec struct {
// Name of the extension/schema
Name string `json:"name"`
// Specifies whether an extension/schema should be present or absent in
// the database. If set to `present`, the extension/schema will be
// created if it does not exist. If set to `absent`, the
// extension/schema will be removed if it exists.
// +kubebuilder:default:="present"
// +kubebuilder:validation:Enum=present;absent
// +optional
Ensure EnsureOption `json:"ensure"`
}
// SchemaSpec configures a schema in a database
type SchemaSpec struct {
// Common fields
DatabaseObjectSpec `json:",inline"`
// The role name of the user who owns the schema inside PostgreSQL.
// It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the
// `OWNER TO` command of `ALTER SCHEMA`.
Owner string `json:"owner,omitempty"`
}
// ExtensionSpec configures an extension in a database
type ExtensionSpec struct {
// Common fields
DatabaseObjectSpec `json:",inline"`
// The version of the extension to install. If empty, the operator will
// install the default version (whatever is specified in the
// extension's control file)
Version string `json:"version,omitempty"`
// The name of the schema in which to install the extension's objects,
// in case the extension allows its contents to be relocated. If not
// specified (default), and the extension's control file does not
// specify a schema either, the current default object creation schema
// is used.
Schema string `json:"schema,omitempty"`
}
// DatabaseStatus defines the observed state of Database
type DatabaseStatus struct {
// A sequence number representing the latest
// desired state that was synchronized
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Applied is true if the database was reconciled correctly
// +optional
Applied *bool `json:"applied,omitempty"`
// Message is the reconciliation output message
// +optional
Message string `json:"message,omitempty"`
// Schemas is the status of the managed schemas
// +optional
Schemas []DatabaseObjectStatus `json:"schemas,omitempty"`
// Extensions is the status of the managed extensions
// +optional
Extensions []DatabaseObjectStatus `json:"extensions,omitempty"`
}
// DatabaseObjectStatus is the status of the managed database objects
type DatabaseObjectStatus struct {
// The name of the object
Name string `json:"name"`
// True of the object has been installed successfully in
// the database
Applied bool `json:"applied"`
// Message is the object reconciliation message
// +optional
Message string `json:"message,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name"
// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied"
// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message"
// Database is the Schema for the databases API
type Database struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Specification of the desired Database.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec DatabaseSpec `json:"spec"`
// Most recently observed status of the Database. This data may not be up to
// date. Populated by the system. Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status DatabaseStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// DatabaseList contains a list of Database
type DatabaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Database `json:"items"`
}
func init() {
SchemeBuilder.Register(&Database{}, &DatabaseList{})
}

23
api/v1/doc.go Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
// Package v1 contains API Schema definitions for the postgresql v1 API group
// +kubebuilder:object:generate=true
// +groupName=postgresql.cnpg.io
package v1

67
api/v1/generic_funcs.go Normal file
View File

@ -0,0 +1,67 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"fmt"
corev1 "k8s.io/api/core/v1"
)
type managedResourceComparer interface {
GetName() string
GetManagedObjectName() string
GetClusterRef() corev1.LocalObjectReference
HasReconciliations() bool
}
func ensureManagedResourceExclusivity[T managedResourceComparer](t1 T, list []T) error {
for _, t2 := range list {
if t1.GetName() == t2.GetName() {
continue
}
if t1.GetClusterRef().Name != t2.GetClusterRef().Name {
continue
}
if !t2.HasReconciliations() {
continue
}
if t1.GetManagedObjectName() == t2.GetManagedObjectName() {
return fmt.Errorf(
"%q is already managed by object %q",
t1.GetManagedObjectName(), t2.GetName(),
)
}
}
return nil
}
// toSliceWithPointers converts a slice of items to a slice of pointers to the items
func toSliceWithPointers[T any](items []T) []*T {
result := make([]*T, len(items))
for i, item := range items {
result[i] = &item
}
return result
}

View File

@ -0,0 +1,36 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// +kubebuilder:object:generate=false
// GenericImageCatalog is an interface used to manage ClusterImageCatalog and ImageCatalog in the same way
type GenericImageCatalog interface {
runtime.Object
metav1.Object
// GetSpec returns the Spec of the GenericImageCatalog
GetSpec() *ImageCatalogSpec
}

View File

@ -0,0 +1,62 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
const (
// ClusterKind is the kind name of Clusters
ClusterKind = "Cluster"
// BackupKind is the kind name of Backups
BackupKind = "Backup"
// PoolerKind is the kind name of Poolers
PoolerKind = "Pooler"
// ImageCatalogKind is the kind name of namespaced image catalogs
ImageCatalogKind = "ImageCatalog"
// ClusterImageCatalogKind is the kind name of the cluster-wide image catalogs
ClusterImageCatalogKind = "ClusterImageCatalog"
// PublicationKind is the kind name of publications
PublicationKind = "Publication"
// SubscriptionKind is the kind name of subscriptions
SubscriptionKind = "Subscription"
// DatabaseKind is the kind name of databases
DatabaseKind = "Database"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,36 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
// GetSpec returns the Spec of the ImageCatalog
func (c *ImageCatalog) GetSpec() *ImageCatalogSpec {
return &c.Spec
}
// FindImageForMajor finds the correct image for the selected major version
func (spec *ImageCatalogSpec) FindImageForMajor(major int) (string, bool) {
for _, entry := range spec.Images {
if entry.Major == major {
return entry.Image, true
}
}
return "", false
}

View File

@ -0,0 +1,52 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("image catalog", func() {
catalogSpec := ImageCatalogSpec{
Images: []CatalogImage{
{
Image: "test:15",
Major: 15,
},
{
Image: "test:16",
Major: 16,
},
},
}
It("looks up an image given the major version", func() {
image, ok := catalogSpec.FindImageForMajor(16)
Expect(image).To(Equal("test:16"))
Expect(ok).To(BeTrue())
})
It("complains whether the requested image is not specified", func() {
image, ok := catalogSpec.FindImageForMajor(13)
Expect(image).To(BeEmpty())
Expect(ok).To(BeFalse())
})
})

View File

@ -0,0 +1,72 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ImageCatalogSpec defines the desired ImageCatalog
type ImageCatalogSpec struct {
// List of CatalogImages available in the catalog
// +kubebuilder:validation:MinItems=1
// +kubebuilder:validation:MaxItems=8
// +kubebuilder:validation:XValidation:rule="self.all(e, self.filter(f, f.major==e.major).size() == 1)",message=Images must have unique major versions
Images []CatalogImage `json:"images"`
}
// CatalogImage defines the image and major version
type CatalogImage struct {
// The image reference
Image string `json:"image"`
// +kubebuilder:validation:Minimum=10
// The PostgreSQL major version of the image. Must be unique within the catalog.
Major int `json:"major"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// ImageCatalog is the Schema for the imagecatalogs API
type ImageCatalog struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Specification of the desired behavior of the ImageCatalog.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec ImageCatalogSpec `json:"spec"`
}
// +kubebuilder:object:root=true
// ImageCatalogList contains a list of ImageCatalog
type ImageCatalogList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metav1.ListMeta `json:"metadata"`
// List of ImageCatalogs
Items []ImageCatalog `json:"items"`
}
func init() {
SchemeBuilder.Register(&ImageCatalog{}, &ImageCatalogList{})
}

60
api/v1/pooler_funcs.go Normal file
View File

@ -0,0 +1,60 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
// IsPaused returns whether all database should be paused or not.
func (in PgBouncerSpec) IsPaused() bool {
return in.Paused != nil && *in.Paused
}
// GetAuthQuerySecretName returns the specified AuthQuerySecret name for PgBouncer
// if provided or the default name otherwise.
func (in *Pooler) GetAuthQuerySecretName() string {
if in.Spec.PgBouncer != nil && in.Spec.PgBouncer.AuthQuerySecret != nil {
return in.Spec.PgBouncer.AuthQuerySecret.Name
}
return in.Spec.Cluster.Name + DefaultPgBouncerPoolerSecretSuffix
}
// GetAuthQuery returns the specified AuthQuery name for PgBouncer
// if provided or the default name otherwise.
func (in *Pooler) GetAuthQuery() string {
if in.Spec.PgBouncer.AuthQuery != "" {
return in.Spec.PgBouncer.AuthQuery
}
return DefaultPgBouncerPoolerAuthQuery
}
// IsAutomatedIntegration returns whether the Pooler integration with the
// Cluster is automated or not.
func (in *Pooler) IsAutomatedIntegration() bool {
if in.Spec.PgBouncer == nil {
return true
}
// If the user specified an AuthQuerySecret or an AuthQuery, the integration
// is not going to be handled by the operator.
if (in.Spec.PgBouncer.AuthQuerySecret != nil && in.Spec.PgBouncer.AuthQuerySecret.Name != "") ||
in.Spec.PgBouncer.AuthQuery != "" {
return false
}
return true
}

View File

@ -0,0 +1,40 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Pooler type tests", func() {
It("pgbouncer pools are not paused by default", func() {
pgbouncer := PgBouncerSpec{}
Expect(pgbouncer.IsPaused()).To(BeFalse())
})
It("pgbouncer pools can be paused", func() {
trueVal := true
pgbouncer := PgBouncerSpec{
Paused: &trueVal,
}
Expect(pgbouncer.IsPaused()).To(BeTrue())
})
})

276
api/v1/pooler_types.go Normal file
View File

@ -0,0 +1,276 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PoolerType is the type of the connection pool, meaning the service
// we are targeting. Allowed values are `rw` and `ro`.
// +kubebuilder:validation:Enum=rw;ro;r
type PoolerType string
const (
// PoolerTypeRW means that the pooler involves only the primary server
PoolerTypeRW = PoolerType("rw")
// PoolerTypeRO means that the pooler involves only the replicas
PoolerTypeRO = PoolerType("ro")
// PoolerTypeR means that the pooler involves every instance
PoolerTypeR = PoolerType("r")
// DefaultPgBouncerPoolerAuthQuery is the default auth_query for PgBouncer
DefaultPgBouncerPoolerAuthQuery = "SELECT usename, passwd FROM public.user_search($1)"
)
// PgBouncerPoolMode is the mode of PgBouncer
// +kubebuilder:validation:Enum=session;transaction
type PgBouncerPoolMode string
const (
// PgBouncerPoolModeSession the "session" mode
PgBouncerPoolModeSession = PgBouncerPoolMode("session")
// PgBouncerPoolModeTransaction the "transaction" mode
PgBouncerPoolModeTransaction = PgBouncerPoolMode("transaction")
)
// PoolerSpec defines the desired state of Pooler
type PoolerSpec struct {
// This is the cluster reference on which the Pooler will work.
// Pooler name should never match with any cluster name within the same namespace.
Cluster LocalObjectReference `json:"cluster"`
// Type of service to forward traffic to. Default: `rw`.
// +kubebuilder:default:=rw
// +optional
Type PoolerType `json:"type,omitempty"`
// The number of replicas we want. Default: 1.
// +kubebuilder:default:=1
// +optional
Instances *int32 `json:"instances,omitempty"`
// The template of the Pod to be created
// +optional
Template *PodTemplateSpec `json:"template,omitempty"`
// The PgBouncer configuration
PgBouncer *PgBouncerSpec `json:"pgbouncer"`
// The deployment strategy to use for pgbouncer to replace existing pods with new ones
// +optional
DeploymentStrategy *appsv1.DeploymentStrategy `json:"deploymentStrategy,omitempty"`
// The configuration of the monitoring infrastructure of this pooler.
// +optional
Monitoring *PoolerMonitoringConfiguration `json:"monitoring,omitempty"`
// Template for the Service to be created
// +optional
ServiceTemplate *ServiceTemplateSpec `json:"serviceTemplate,omitempty"`
}
// PoolerMonitoringConfiguration is the type containing all the monitoring
// configuration for a certain Pooler.
//
// Mirrors the Cluster's MonitoringConfiguration but without the custom queries
// part for now.
type PoolerMonitoringConfiguration struct {
// Enable or disable the `PodMonitor`
// +kubebuilder:default:=false
// +optional
EnablePodMonitor bool `json:"enablePodMonitor,omitempty"`
// The list of metric relabelings for the `PodMonitor`. Applied to samples before ingestion.
// +optional
PodMonitorMetricRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorMetricRelabelings,omitempty"`
// The list of relabelings for the `PodMonitor`. Applied to samples before scraping.
// +optional
PodMonitorRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorRelabelings,omitempty"`
}
// PodTemplateSpec is a structure allowing the user to set
// a template for Pod generation.
//
// Unfortunately we can't use the corev1.PodTemplateSpec
// type because the generated CRD won't have the field for the
// metadata section.
//
// References:
// https://github.com/kubernetes-sigs/controller-tools/issues/385
// https://github.com/kubernetes-sigs/controller-tools/issues/448
// https://github.com/prometheus-operator/prometheus-operator/issues/3041
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
ObjectMeta Metadata `json:"metadata,omitempty"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec corev1.PodSpec `json:"spec,omitempty"`
}
// ServiceTemplateSpec is a structure allowing the user to set
// a template for Service generation.
type ServiceTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
ObjectMeta Metadata `json:"metadata,omitempty"`
// Specification of the desired behavior of the service.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec corev1.ServiceSpec `json:"spec,omitempty"`
}
// PgBouncerSpec defines how to configure PgBouncer
type PgBouncerSpec struct {
// The pool mode. Default: `session`.
// +kubebuilder:default:=session
// +optional
PoolMode PgBouncerPoolMode `json:"poolMode,omitempty"`
// The credentials of the user that need to be used for the authentication
// query. In case it is specified, also an AuthQuery
// (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
// has to be specified and no automatic CNPG Cluster integration will be triggered.
// +optional
AuthQuerySecret *LocalObjectReference `json:"authQuerySecret,omitempty"`
// The query that will be used to download the hash of the password
// of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
// In case it is specified, also an AuthQuerySecret has to be specified and
// no automatic CNPG Cluster integration will be triggered.
// +optional
AuthQuery string `json:"authQuery,omitempty"`
// Additional parameters to be passed to PgBouncer - please check
// the CNPG documentation for a list of options you can configure
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
// PostgreSQL Host Based Authentication rules (lines to be appended
// to the pg_hba.conf file)
// +optional
PgHBA []string `json:"pg_hba,omitempty"`
// When set to `true`, PgBouncer will disconnect from the PostgreSQL
// server, first waiting for all queries to complete, and pause all new
// client connections until this value is set to `false` (default). Internally,
// the operator calls PgBouncer's `PAUSE` and `RESUME` commands.
// +kubebuilder:default:=false
// +optional
Paused *bool `json:"paused,omitempty"`
}
// PoolerStatus defines the observed state of Pooler
type PoolerStatus struct {
// The resource version of the config object
// +optional
Secrets *PoolerSecrets `json:"secrets,omitempty"`
// The number of pods trying to be scheduled
// +optional
Instances int32 `json:"instances,omitempty"`
}
// PoolerSecrets contains the versions of all the secrets used
type PoolerSecrets struct {
// The server TLS secret version
// +optional
ServerTLS SecretVersion `json:"serverTLS,omitempty"`
// The server CA secret version
// +optional
ServerCA SecretVersion `json:"serverCA,omitempty"`
// The client CA secret version
// +optional
ClientCA SecretVersion `json:"clientCA,omitempty"`
// The version of the secrets used by PgBouncer
// +optional
PgBouncerSecrets *PgBouncerSecrets `json:"pgBouncerSecrets,omitempty"`
}
// PgBouncerSecrets contains the versions of the secrets used
// by pgbouncer
type PgBouncerSecrets struct {
// The auth query secret version
// +optional
AuthQuery SecretVersion `json:"authQuery,omitempty"`
}
// SecretVersion contains a secret name and its ResourceVersion
type SecretVersion struct {
// The name of the secret
// +optional
Name string `json:"name,omitempty"`
// The ResourceVersion of the secret
// +optional
Version string `json:"version,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
// +kubebuilder:subresource:scale:specpath=.spec.instances,statuspath=.status.instances
// Pooler is the Schema for the poolers API
type Pooler struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Specification of the desired behavior of the Pooler.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec PoolerSpec `json:"spec"`
// Most recently observed status of the Pooler. This data may not be up to
// date. Populated by the system. Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status PoolerStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// PoolerList contains a list of Pooler
type PoolerList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pooler `json:"items"`
}
func init() {
SchemeBuilder.Register(&Pooler{}, &PoolerList{})
}

View File

@ -0,0 +1,80 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
)
// SetAsFailed sets the publication as failed with the given error
func (pub *Publication) SetAsFailed(err error) {
pub.Status.Applied = ptr.To(false)
pub.Status.Message = err.Error()
}
// SetAsUnknown sets the publication as unknown with the given error
func (pub *Publication) SetAsUnknown(err error) {
pub.Status.Applied = nil
pub.Status.Message = err.Error()
}
// SetAsReady sets the subscription as working correctly
func (pub *Publication) SetAsReady() {
pub.Status.Applied = ptr.To(true)
pub.Status.Message = ""
pub.Status.ObservedGeneration = pub.Generation
}
// GetStatusMessage returns the status message of the publication
func (pub *Publication) GetStatusMessage() string {
return pub.Status.Message
}
// GetClusterRef returns the cluster reference of the publication
func (pub *Publication) GetClusterRef() corev1.LocalObjectReference {
return pub.Spec.ClusterRef
}
// GetManagedObjectName returns the name of the managed publication object
func (pub *Publication) GetManagedObjectName() string {
return pub.Spec.Name
}
// HasReconciliations returns true if the publication has been reconciled at least once
func (pub *Publication) HasReconciliations() bool {
return pub.Status.ObservedGeneration > 0
}
// GetName returns the publication name
func (pub *Publication) GetName() string {
return pub.Name
}
// SetStatusObservedGeneration sets the observed generation of the publication
func (pub *Publication) SetStatusObservedGeneration(obsGeneration int64) {
pub.Status.ObservedGeneration = obsGeneration
}
// MustHaveManagedResourceExclusivity detects conflicting publications
func (pub *PublicationList) MustHaveManagedResourceExclusivity(reference *Publication) error {
pointers := toSliceWithPointers(pub.Items)
return ensureManagedResourceExclusivity(reference, pointers)
}

165
api/v1/publication_types.go Normal file
View File

@ -0,0 +1,165 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications.
// +enum
type PublicationReclaimPolicy string
const (
// PublicationReclaimDelete means the publication will be deleted from Kubernetes on release
// from its claim.
PublicationReclaimDelete PublicationReclaimPolicy = "delete"
// PublicationReclaimRetain means the publication will be left in its current phase for manual
// reclamation by the administrator. The default policy is Retain.
PublicationReclaimRetain PublicationReclaimPolicy = "retain"
)
// PublicationSpec defines the desired state of Publication
type PublicationSpec struct {
// The name of the PostgreSQL cluster that identifies the "publisher"
ClusterRef corev1.LocalObjectReference `json:"cluster"`
// The name of the publication inside PostgreSQL
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
Name string `json:"name"`
// The name of the database where the publication will be installed in
// the "publisher" cluster
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable"
DBName string `json:"dbname"`
// Publication parameters part of the `WITH` clause as expected by
// PostgreSQL `CREATE PUBLICATION` command
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
// Target of the publication as expected by PostgreSQL `CREATE PUBLICATION` command
Target PublicationTarget `json:"target"`
// The policy for end-of-life maintenance of this publication
// +kubebuilder:validation:Enum=delete;retain
// +kubebuilder:default:=retain
// +optional
ReclaimPolicy PublicationReclaimPolicy `json:"publicationReclaimPolicy,omitempty"`
}
// PublicationTarget is what this publication should publish
// +kubebuilder:validation:XValidation:rule="(has(self.allTables) && !has(self.objects)) || (!has(self.allTables) && has(self.objects))",message="allTables and objects are mutually exclusive"
type PublicationTarget struct {
// Marks the publication as one that replicates changes for all tables
// in the database, including tables created in the future.
// Corresponding to `FOR ALL TABLES` in PostgreSQL.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="allTables is immutable"
// +optional
AllTables bool `json:"allTables,omitempty"`
// Just the following schema objects
// +kubebuilder:validation:XValidation:rule="!(self.exists(o, has(o.table) && has(o.table.columns)) && self.exists(o, has(o.tablesInSchema)))",message="specifying a column list when the publication also publishes tablesInSchema is not supported"
// +kubebuilder:validation:MaxItems=100000
// +optional
Objects []PublicationTargetObject `json:"objects,omitempty"`
}
// PublicationTargetObject is an object to publish
// +kubebuilder:validation:XValidation:rule="(has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) && has(self.table))",message="tablesInSchema and table are mutually exclusive"
type PublicationTargetObject struct {
// Marks the publication as one that replicates changes for all tables
// in the specified list of schemas, including tables created in the
// future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL.
// +optional
TablesInSchema string `json:"tablesInSchema,omitempty"`
// Specifies a list of tables to add to the publication. Corresponding
// to `FOR TABLE` in PostgreSQL.
// +optional
Table *PublicationTargetTable `json:"table,omitempty"`
}
// PublicationTargetTable is a table to publish
type PublicationTargetTable struct {
// Whether to limit to the table only or include all its descendants
// +optional
Only bool `json:"only,omitempty"`
// The table name
Name string `json:"name"`
// The schema name
// +optional
Schema string `json:"schema,omitempty"`
// The columns to publish
// +optional
Columns []string `json:"columns,omitempty"`
}
// PublicationStatus defines the observed state of Publication
type PublicationStatus struct {
// A sequence number representing the latest
// desired state that was synchronized
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Applied is true if the publication was reconciled correctly
// +optional
Applied *bool `json:"applied,omitempty"`
// Message is the reconciliation output message
// +optional
Message string `json:"message,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name"
// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied"
// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message"
// Publication is the Schema for the publications API
type Publication struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PublicationSpec `json:"spec"`
Status PublicationStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// PublicationList contains a list of Publication
type PublicationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Publication `json:"items"`
}
func init() {
SchemeBuilder.Register(&Publication{}, &PublicationList{})
}

View File

@ -0,0 +1,94 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
// IsSuspended check if a scheduled backup has been suspended or not
func (scheduledBackup ScheduledBackup) IsSuspended() bool {
if scheduledBackup.Spec.Suspend == nil {
return false
}
return *scheduledBackup.Spec.Suspend
}
// IsImmediate check if a backup has to be issued immediately upon creation or not
func (scheduledBackup ScheduledBackup) IsImmediate() bool {
if scheduledBackup.Spec.Immediate == nil {
return false
}
return *scheduledBackup.Spec.Immediate
}
// GetName gets the scheduled backup name
func (scheduledBackup *ScheduledBackup) GetName() string {
return scheduledBackup.Name
}
// GetNamespace gets the scheduled backup name
func (scheduledBackup *ScheduledBackup) GetNamespace() string {
return scheduledBackup.Namespace
}
// GetSchedule get the cron-like schedule of this scheduled backup
func (scheduledBackup *ScheduledBackup) GetSchedule() string {
return scheduledBackup.Spec.Schedule
}
// GetStatus gets the status that the caller may update
func (scheduledBackup *ScheduledBackup) GetStatus() *ScheduledBackupStatus {
return &scheduledBackup.Status
}
// CreateBackup creates a backup from this scheduled backup
func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup {
backup := Backup{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: scheduledBackup.Namespace,
},
Spec: BackupSpec{
Cluster: scheduledBackup.Spec.Cluster,
Target: scheduledBackup.Spec.Target,
Method: scheduledBackup.Spec.Method,
Online: scheduledBackup.Spec.Online,
OnlineConfiguration: scheduledBackup.Spec.OnlineConfiguration,
PluginConfiguration: scheduledBackup.Spec.PluginConfiguration,
},
}
utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current)
if backup.Annotations == nil {
backup.Annotations = make(map[string]string)
}
if v := scheduledBackup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]; v != "" {
backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] = v
}
return &backup
}

View File

@ -0,0 +1,87 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Scheduled backup", func() {
var scheduledBackup *ScheduledBackup
backupName := "test"
BeforeEach(func() {
scheduledBackup = &ScheduledBackup{
ObjectMeta: metav1.ObjectMeta{
Annotations: make(map[string]string),
},
}
})
It("properly creates a backup with no annotations", func() {
backup := scheduledBackup.CreateBackup("test")
Expect(backup).ToNot(BeNil())
Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName))
Expect(backup.Annotations).To(BeEmpty())
})
It("should always inherit volumeSnapshotDeadline while creating a backup", func() {
scheduledBackup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] = "20"
backup := scheduledBackup.CreateBackup("test")
Expect(backup).ToNot(BeNil())
Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName))
Expect(backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]).To(BeEquivalentTo("20"))
})
It("properly creates a backup with annotations", func() {
annotations := make(map[string]string, 1)
annotations["test"] = "annotations"
scheduledBackup.Annotations = annotations
configuration.Current.InheritedAnnotations = []string{"test"}
backup := scheduledBackup.CreateBackup("test")
Expect(backup).ToNot(BeNil())
Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName))
Expect(backup.Annotations).ToNot(BeEmpty())
Expect(backup.Spec.Target).To(BeEmpty())
})
It("properly creates a backup with standby target", func() {
scheduledBackup.Spec.Target = BackupTargetStandby
backup := scheduledBackup.CreateBackup("test")
Expect(backup).ToNot(BeNil())
Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName))
Expect(backup.Spec.Target).To(BeEquivalentTo(BackupTargetStandby))
})
It("properly creates a backup with primary target", func() {
scheduledBackup.Spec.Target = BackupTargetPrimary
backup := scheduledBackup.CreateBackup("test")
Expect(backup).ToNot(BeNil())
Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName))
Expect(backup.Spec.Target).To(BeEquivalentTo(BackupTargetPrimary))
})
})

View File

@ -0,0 +1,139 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ScheduledBackupSpec defines the desired state of ScheduledBackup
type ScheduledBackupSpec struct {
// If this backup is suspended or not
// +optional
Suspend *bool `json:"suspend,omitempty"`
// If the first backup has to be immediately start after creation or not
// +optional
Immediate *bool `json:"immediate,omitempty"`
// The schedule does not follow the same format used in Kubernetes CronJobs
// as it includes an additional seconds specifier,
// see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
Schedule string `json:"schedule"`
// The cluster to backup
Cluster LocalObjectReference `json:"cluster"`
// Indicates which ownerReference should be put inside the created backup resources.<br />
// - none: no owner reference for created backup objects (same behavior as before the field was introduced)<br />
// - self: sets the Scheduled backup object as owner of the backup<br />
// - cluster: set the cluster as owner of the backup<br />
// +kubebuilder:validation:Enum=none;self;cluster
// +kubebuilder:default:=none
// +optional
BackupOwnerReference string `json:"backupOwnerReference,omitempty"`
// The policy to decide which instance should perform this backup. If empty,
// it defaults to `cluster.spec.backup.target`.
// Available options are empty string, `primary` and `prefer-standby`.
// `primary` to have backups run always on primary instances,
// `prefer-standby` to have backups run preferably on the most updated
// standby, if available.
// +kubebuilder:validation:Enum=primary;prefer-standby
// +optional
Target BackupTarget `json:"target,omitempty"`
// The backup method to be used, possible options are `barmanObjectStore`,
// `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
// +optional
// +kubebuilder:validation:Enum=barmanObjectStore;volumeSnapshot;plugin
// +kubebuilder:default:=barmanObjectStore
Method BackupMethod `json:"method,omitempty"`
// Configuration parameters passed to the plugin managing this backup
// +optional
PluginConfiguration *BackupPluginConfiguration `json:"pluginConfiguration,omitempty"`
// Whether the default type of backup with volume snapshots is
// online/hot (`true`, default) or offline/cold (`false`)
// Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
// +optional
Online *bool `json:"online,omitempty"`
// Configuration parameters to control the online/hot backup with volume snapshots
// Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
// +optional
OnlineConfiguration *OnlineConfiguration `json:"onlineConfiguration,omitempty"`
}
// ScheduledBackupStatus defines the observed state of ScheduledBackup
type ScheduledBackupStatus struct {
// The latest time the schedule
// +optional
LastCheckTime *metav1.Time `json:"lastCheckTime,omitempty"`
// Information when was the last time that backup was successfully scheduled.
// +optional
LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
// Next time we will run a backup
// +optional
NextScheduleTime *metav1.Time `json:"nextScheduleTime,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
// +kubebuilder:printcolumn:name="Last Backup",type="date",JSONPath=".status.lastScheduleTime"
// ScheduledBackup is the Schema for the scheduledbackups API
type ScheduledBackup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Specification of the desired behavior of the ScheduledBackup.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec ScheduledBackupSpec `json:"spec"`
// Most recently observed status of the ScheduledBackup. This data may not be up
// to date. Populated by the system. Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ScheduledBackupStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ScheduledBackupList contains a list of ScheduledBackup
type ScheduledBackupList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
// List of clusters
Items []ScheduledBackup `json:"items"`
}
func init() {
SchemeBuilder.Register(&ScheduledBackup{}, &ScheduledBackupList{})
}

View File

@ -0,0 +1,80 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
)
// SetAsFailed sets the subscription as failed with the given error
func (sub *Subscription) SetAsFailed(err error) {
sub.Status.Applied = ptr.To(false)
sub.Status.Message = err.Error()
}
// SetAsUnknown sets the subscription as unknown with the given error
func (sub *Subscription) SetAsUnknown(err error) {
sub.Status.Applied = nil
sub.Status.Message = err.Error()
}
// SetAsReady sets the subscription as working correctly
func (sub *Subscription) SetAsReady() {
sub.Status.Applied = ptr.To(true)
sub.Status.Message = ""
sub.Status.ObservedGeneration = sub.Generation
}
// GetStatusMessage returns the status message of the subscription
func (sub *Subscription) GetStatusMessage() string {
return sub.Status.Message
}
// GetClusterRef returns the cluster reference of the subscription
func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference {
return sub.Spec.ClusterRef
}
// GetName returns the subscription object name
func (sub *Subscription) GetName() string {
return sub.Name
}
// GetManagedObjectName returns the name of the managed subscription object
func (sub *Subscription) GetManagedObjectName() string {
return sub.Spec.Name
}
// HasReconciliations returns true if the subscription has been reconciled at least once
func (sub *Subscription) HasReconciliations() bool {
return sub.Status.ObservedGeneration > 0
}
// SetStatusObservedGeneration sets the observed generation of the subscription
func (sub *Subscription) SetStatusObservedGeneration(obsGeneration int64) {
sub.Status.ObservedGeneration = obsGeneration
}
// MustHaveManagedResourceExclusivity detects conflicting subscriptions
func (pub *SubscriptionList) MustHaveManagedResourceExclusivity(reference *Subscription) error {
pointers := toSliceWithPointers(pub.Items)
return ensureManagedResourceExclusivity(reference, pointers)
}

View File

@ -0,0 +1,124 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions.
// +enum
type SubscriptionReclaimPolicy string
const (
// SubscriptionReclaimDelete means the subscription will be deleted from Kubernetes on release
// from its claim.
SubscriptionReclaimDelete SubscriptionReclaimPolicy = "delete"
// SubscriptionReclaimRetain means the subscription will be left in its current phase for manual
// reclamation by the administrator. The default policy is Retain.
SubscriptionReclaimRetain SubscriptionReclaimPolicy = "retain"
)
// SubscriptionSpec defines the desired state of Subscription
type SubscriptionSpec struct {
// The name of the PostgreSQL cluster that identifies the "subscriber"
ClusterRef corev1.LocalObjectReference `json:"cluster"`
// The name of the subscription inside PostgreSQL
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
Name string `json:"name"`
// The name of the database where the publication will be installed in
// the "subscriber" cluster
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable"
DBName string `json:"dbname"`
// Subscription parameters part of the `WITH` clause as expected by
// PostgreSQL `CREATE SUBSCRIPTION` command
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
// The name of the publication inside the PostgreSQL database in the
// "publisher"
PublicationName string `json:"publicationName"`
// The name of the database containing the publication on the external
// cluster. Defaults to the one in the external cluster definition.
// +optional
PublicationDBName string `json:"publicationDBName,omitempty"`
// The name of the external cluster with the publication ("publisher")
ExternalClusterName string `json:"externalClusterName"`
// The policy for end-of-life maintenance of this subscription
// +kubebuilder:validation:Enum=delete;retain
// +kubebuilder:default:=retain
// +optional
ReclaimPolicy SubscriptionReclaimPolicy `json:"subscriptionReclaimPolicy,omitempty"`
}
// SubscriptionStatus defines the observed state of Subscription
type SubscriptionStatus struct {
// A sequence number representing the latest
// desired state that was synchronized
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Applied is true if the subscription was reconciled correctly
// +optional
Applied *bool `json:"applied,omitempty"`
// Message is the reconciliation output message
// +optional
Message string `json:"message,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name"
// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied"
// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message"
// Subscription is the Schema for the subscriptions API
type Subscription struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec SubscriptionSpec `json:"spec"`
Status SubscriptionStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// SubscriptionList contains a list of Subscription
type SubscriptionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Subscription `json:"items"`
}
func init() {
SchemeBuilder.Register(&Subscription{}, &SubscriptionList{})
}

32
api/v1/suite_test.go Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestApi(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "v1 API tests")
}

View File

@ -0,0 +1,30 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
package v1
// IMPORTANT:
// This file contains the functions that need to be copied from the api/v1 package to the cloudnative-pg/api
// repository. This is currently required because the controller-gen tool cannot generate DeepCopyInto for the
// regexp type. This will be removed once the controller-gen tool supports this feature.
// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot
// generate the DeepCopyInto for the regexp type.
// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto.
func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {}

File diff suppressed because it is too large Load Diff

147
cmd/kubectl-cnpg/main.go Normal file
View File

@ -0,0 +1,147 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
/*
kubectl-cnp is a plugin to manage your CloudNativePG clusters
*/
package main
import (
"os"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/backup"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/certificate"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/destroy"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/fence"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/fio"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/hibernate"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/install"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logs"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/maintenance"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/pgadmin"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/pgbench"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/promote"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/psql"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/reload"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/report"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/restart"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/snapshot"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/status"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/versions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
func main() {
logFlags := &log.Flags{}
configFlags := genericclioptions.NewConfigFlags(true)
rootCmd := &cobra.Command{
Use: "kubectl-cnpg",
Short: "A plugin to manage your CloudNativePG clusters",
Annotations: map[string]string{
cobra.CommandDisplayNameAnnotation: "kubectl cnpg",
},
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
// If we're invoking the completion command we shouldn't try to create
// a Kubernetes client and we just let the Cobra flow to continue
if cmd.Name() == "completion" || cmd.Name() == "version" ||
cmd.HasParent() && cmd.Parent().Name() == "completion" {
return nil
}
plugin.ConfigureColor(cmd)
return plugin.SetupKubernetesClient(configFlags)
},
}
logFlags.AddFlags(rootCmd.PersistentFlags())
configFlags.AddFlags(rootCmd.PersistentFlags())
adminGroup := &cobra.Group{
ID: plugin.GroupIDAdmin,
Title: "Operator-level administration",
}
troubleshootingGroup := &cobra.Group{
ID: plugin.GroupIDTroubleshooting,
Title: "Troubleshooting",
}
pgClusterGroup := &cobra.Group{
ID: plugin.GroupIDCluster,
Title: "Cluster administration",
}
pgDatabaseGroup := &cobra.Group{
ID: plugin.GroupIDDatabase,
Title: "Database administration",
}
miscGroup := &cobra.Group{
ID: plugin.GroupIDMiscellaneous,
Title: "Miscellaneous",
}
rootCmd.AddGroup(adminGroup, troubleshootingGroup, pgClusterGroup, pgDatabaseGroup, miscGroup)
subcommands := []*cobra.Command{
backup.NewCmd(),
certificate.NewCmd(),
destroy.NewCmd(),
fence.NewCmd(),
fio.NewCmd(),
hibernate.NewCmd(),
install.NewCmd(),
logs.NewCmd(),
maintenance.NewCmd(),
pgadmin.NewCmd(),
pgbench.NewCmd(),
promote.NewCmd(),
psql.NewCmd(),
publication.NewCmd(),
reload.NewCmd(),
report.NewCmd(),
restart.NewCmd(),
snapshot.NewCmd(),
status.NewCmd(),
subscription.NewCmd(),
versions.NewCmd(),
}
for _, cmd := range subcommands {
plugin.AddColorControlFlag(cmd)
rootCmd.AddCommand(cmd)
}
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}

74
cmd/manager/main.go Normal file
View File

@ -0,0 +1,74 @@
/*
Copyright © contributors to CloudNativePG, established as
CloudNativePG a Series of LF Projects, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
*/
/*
The manager command is the main entrypoint of CloudNativePG operator.
*/
package main
import (
"os"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/backup"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/bootstrap"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/debug"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/pgbouncer"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/show"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/walarchive"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/walrestore"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/versions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
func main() {
cobra.EnableTraverseRunHooks = true
logFlags := &log.Flags{}
cmd := &cobra.Command{
Use: "manager [cmd]",
SilenceUsage: true,
PersistentPreRun: func(_ *cobra.Command, _ []string) {
logFlags.ConfigureLogging()
},
}
logFlags.AddFlags(cmd.PersistentFlags())
cmd.AddCommand(backup.NewCmd())
cmd.AddCommand(bootstrap.NewCmd())
cmd.AddCommand(controller.NewCmd())
cmd.AddCommand(instance.NewCmd())
cmd.AddCommand(show.NewCmd())
cmd.AddCommand(walarchive.NewCmd())
cmd.AddCommand(walrestore.NewCmd())
cmd.AddCommand(versions.NewCmd())
cmd.AddCommand(pgbouncer.NewCmd())
cmd.AddCommand(debug.NewCmd())
if err := cmd.Execute(); err != nil {
os.Exit(1)
}
}

View File

@ -0,0 +1,24 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize

Some files were not shown because too many files have changed in this diff Show More