mirror of
https://github.com/bitnami/charts.git
synced 2026-04-02 15:27:08 +08:00
[bitnami/mongodb-sharded]Ginkgo tests: Use rollout instead of scale down (#28937)
* [bitnami/mongodb-sharded]Ginkgo tests: Use rollout instead of scale down Signed-off-by: Miguel Ruiz <miguel.ruiz@broadcom.com> * Bump version Signed-off-by: Miguel Ruiz <miguel.ruiz@broadcom.com> * Update CHANGELOG.md Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com> --------- Signed-off-by: Miguel Ruiz <miguel.ruiz@broadcom.com> Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com> Co-authored-by: Bitnami Containers <bitnami-bot@vmware.com>
This commit is contained in:
@@ -36,12 +36,14 @@ func init() {
|
||||
timeout = time.Duration(timeoutSeconds) * time.Second
|
||||
}
|
||||
|
||||
func TestMariaDB(t *testing.T) {
|
||||
func TestMongoDBSharded(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "MongoDB Sharded Persistence Test Suite")
|
||||
}
|
||||
|
||||
func createJob(ctx context.Context, c kubernetes.Interface, name, port, image, stmt string) error {
|
||||
// Default job TTL in seconds
|
||||
ttl := int32(10)
|
||||
securityContext := &v1.SecurityContext{
|
||||
Privileged: &[]bool{false}[0],
|
||||
AllowPrivilegeEscalation: &[]bool{false}[0],
|
||||
@@ -61,6 +63,7 @@ func createJob(ctx context.Context, c kubernetes.Interface, name, port, image, s
|
||||
Kind: "Job",
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
TTLSecondsAfterFinished: &ttl,
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: "Never",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
@@ -37,6 +38,9 @@ var _ = Describe("MongoDB Sharded", Ordered, func() {
|
||||
getSucceededJobs := func(j *batchv1.Job) int32 { return j.Status.Succeeded }
|
||||
|
||||
getOpts := metav1.GetOptions{}
|
||||
restartKey := "kubectl.kubernetes.io/restartedAt"
|
||||
restartAnnotation := map[string]string{restartKey: time.Now().Format(time.RFC3339)}
|
||||
getRestartedAtAnnotation := func(pod *v1.Pod) string { return pod.Annotations[restartKey] }
|
||||
|
||||
for i := 0; i < shards; i++ {
|
||||
By(fmt.Sprintf("checking all the shard %d replicas are available", i))
|
||||
@@ -85,24 +89,19 @@ var _ = Describe("MongoDB Sharded", Ordered, func() {
|
||||
}, timeout, PollingInterval).Should(WithTransform(getSucceededJobs, Equal(int32(1))))
|
||||
|
||||
for i := 0; i < shards; i++ {
|
||||
By(fmt.Sprintf("Scaling shard %d down to 0 replicas", i))
|
||||
By(fmt.Sprintf("Running rollout restart of shard %d", i))
|
||||
shardName := fmt.Sprintf("%s-shard%d-data", releaseName, i)
|
||||
ss, err := c.AppsV1().StatefulSets(namespace).Get(ctx, shardName, getOpts)
|
||||
shardOrigReplicas := *ss.Spec.Replicas
|
||||
ss, err = utils.StsScale(ctx, c, ss, 0)
|
||||
// Annotate pods to force a rollout restart
|
||||
ss, err = utils.StsAnnotateTemplate(ctx, c, ss, restartAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ss.Status.Replicas).NotTo(BeZero())
|
||||
Eventually(func() (*appsv1.StatefulSet, error) {
|
||||
return c.AppsV1().StatefulSets(namespace).Get(ctx, shardName, getOpts)
|
||||
}, timeout, PollingInterval).Should(WithTransform(getAvailableReplicas, BeZero()))
|
||||
|
||||
By(fmt.Sprintf("Scaling shard %d to the original replicas", i))
|
||||
ss, err = utils.StsScale(ctx, c, ss, shardOrigReplicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() (*appsv1.StatefulSet, error) {
|
||||
return c.AppsV1().StatefulSets(namespace).Get(ctx, shardName, getOpts)
|
||||
}, timeout, PollingInterval).Should(WithTransform(getAvailableReplicas, Equal(shardOrigReplicas)))
|
||||
// Wait for the new annotation in the existing pods
|
||||
for i := int(shardOrigReplicas) - 1; i >= 0; i-- {
|
||||
Eventually(func() (*v1.Pod, error) {
|
||||
return c.CoreV1().Pods(namespace).Get(ctx, fmt.Sprintf("%s-%d", shardName, i), getOpts)
|
||||
}, timeout, PollingInterval).Should(WithTransform(getRestartedAtAnnotation, Equal(restartAnnotation[restartKey])))
|
||||
}
|
||||
}
|
||||
|
||||
By("creating a job to drop the test database")
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## 8.3.4 (2024-07-25)
|
||||
## 8.3.5 (2024-08-20)
|
||||
|
||||
* [bitnami/mongodb-sharded] Release 8.3.4 ([#28476](https://github.com/bitnami/charts/pull/28476))
|
||||
* [bitnami/mongodb-sharded]Ginkgo tests: Use rollout instead of scale down ([#28937](https://github.com/bitnami/charts/pull/28937))
|
||||
|
||||
## <small>8.3.4 (2024-07-25)</small>
|
||||
|
||||
* [bitnami/mongodb-sharded] Release 8.3.4 (#28476) ([a0f15d4](https://github.com/bitnami/charts/commit/a0f15d4092aab507d6a2e98f457bda516f5caf80)), closes [#28476](https://github.com/bitnami/charts/issues/28476)
|
||||
|
||||
## <small>8.3.3 (2024-07-24)</small>
|
||||
|
||||
|
||||
@@ -35,4 +35,4 @@ maintainers:
|
||||
name: mongodb-sharded
|
||||
sources:
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/mongodb-sharded
|
||||
version: 8.3.4
|
||||
version: 8.3.5
|
||||
|
||||
Reference in New Issue
Block a user