diff --git a/pkg/webhook/fleetresourcehandler/fleetresourcehandler_webhook.go b/pkg/webhook/fleetresourcehandler/fleetresourcehandler_webhook.go index 41e2af6ab..5c04bd118 100644 --- a/pkg/webhook/fleetresourcehandler/fleetresourcehandler_webhook.go +++ b/pkg/webhook/fleetresourcehandler/fleetresourcehandler_webhook.go @@ -9,7 +9,6 @@ import ( admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -31,15 +30,13 @@ const ( ) var ( - crdGVK = metav1.GroupVersionKind{Group: v1.SchemeGroupVersion.Group, Version: v1.SchemeGroupVersion.Version, Kind: "CustomResourceDefinition"} - mcGVK = metav1.GroupVersionKind{Group: fleetv1alpha1.GroupVersion.Group, Version: fleetv1alpha1.GroupVersion.Version, Kind: "MemberCluster"} - imcGVK = metav1.GroupVersionKind{Group: fleetv1alpha1.GroupVersion.Group, Version: fleetv1alpha1.GroupVersion.Version, Kind: "InternalMemberCluster"} - roleGVK = metav1.GroupVersionKind{Group: rbacv1.SchemeGroupVersion.Group, Version: rbacv1.SchemeGroupVersion.Version, Kind: "Role"} - roleBindingGVK = metav1.GroupVersionKind{Group: rbacv1.SchemeGroupVersion.Group, Version: rbacv1.SchemeGroupVersion.Version, Kind: "RoleBinding"} - namespaceGVK = metav1.GroupVersionKind{Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Kind: "Namespace"} + crdGVK = metav1.GroupVersionKind{Group: v1.SchemeGroupVersion.Group, Version: v1.SchemeGroupVersion.Version, Kind: "CustomResourceDefinition"} + mcGVK = metav1.GroupVersionKind{Group: fleetv1alpha1.GroupVersion.Group, Version: fleetv1alpha1.GroupVersion.Version, Kind: "MemberCluster"} + imcGVK = metav1.GroupVersionKind{Group: fleetv1alpha1.GroupVersion.Group, Version: fleetv1alpha1.GroupVersion.Version, Kind: "InternalMemberCluster"} + namespaceGVK = metav1.GroupVersionKind{Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Kind: "Namespace"} ) -// Add registers the webhook for K8s bulit-in object types. +// Add registers the webhook for K8s built-in object types. func Add(mgr manager.Manager, whiteListedUsers []string) error { hookServer := mgr.GetWebhookServer() hookServer.Register(ValidationPath, &webhook.Admission{Handler: &fleetResourceValidator{client: mgr.GetClient(), whiteListedUsers: whiteListedUsers}}) @@ -54,28 +51,29 @@ type fleetResourceValidator struct { // Handle receives the request then allows/denies the request to modify fleet resources. func (v *fleetResourceValidator) Handle(ctx context.Context, req admission.Request) admission.Response { + // special case for Kind:Namespace resources req.Name and req.Namespace has the same value the ObjectMeta.Name of Namespace. + if req.Kind.Kind == "Namespace" { + req.Namespace = "" + } namespacedName := types.NamespacedName{Name: req.Name, Namespace: req.Namespace} var response admission.Response if req.Operation == admissionv1.Create || req.Operation == admissionv1.Update || req.Operation == admissionv1.Delete { - switch req.Kind { - case crdGVK: + switch { + case req.Kind == crdGVK: klog.V(2).InfoS("handling CRD resource", "GVK", crdGVK, "namespacedName", namespacedName, "operation", req.Operation) response = v.handleCRD(req) - case mcGVK: - klog.V(2).InfoS("handling Member cluster resource", "GVK", mcGVK, "namespacedName", namespacedName, "operation", req.Operation) + case req.Kind == mcGVK: + klog.V(2).InfoS("handling member cluster resource", "GVK", mcGVK, "namespacedName", namespacedName, "operation", req.Operation) response = v.handleMemberCluster(req) - case imcGVK: - klog.V(2).InfoS("handling Internal member cluster resource", "GVK", imcGVK, "namespacedName", namespacedName, "operation", req.Operation) - response = v.handleInternalMemberCluster(ctx, req) - case roleGVK: - klog.V(2).InfoS("handling Role resource", "GVK", roleGVK, "namespacedName", namespacedName, "operation", req.Operation) - response = v.handleRole(req) - case roleBindingGVK: - klog.V(2).InfoS("handling Role binding resource", "GVK", roleBindingGVK, "namespacedName", namespacedName, "operation", req.Operation) - response = v.handleRoleBinding(req) - case namespaceGVK: + case req.Kind == namespaceGVK: klog.V(2).InfoS("handling namespace resource", "GVK", namespaceGVK, "namespacedName", namespacedName, "operation", req.Operation) response = v.handleNamespace(req) + case req.Kind == imcGVK: + klog.V(2).InfoS("handling internal member cluster resource", "GVK", imcGVK, "namespacedName", namespacedName, "operation", req.Operation) + response = v.handleInternalMemberCluster(ctx, req) + case req.Namespace != "": + klog.V(2).InfoS(fmt.Sprintf("handling %s resource", req.Kind.Kind), "GVK", req.Kind, "namespacedName", namespacedName, "operation", req.Operation) + response = validation.ValidateUserForResource(req.Kind.Kind, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, v.whiteListedUsers, req.UserInfo) default: klog.V(2).InfoS("resource is not monitored by fleet resource validator webhook", "GVK", req.Kind.String(), "namespacedName", namespacedName, "operation", req.Operation) response = admission.Allowed(fmt.Sprintf("user: %s in groups: %v is allowed to modify resource with GVK: %s", req.UserInfo.Username, req.UserInfo.Groups, req.Kind.String())) @@ -127,24 +125,7 @@ func (v *fleetResourceValidator) handleInternalMemberCluster(ctx context.Context return validation.ValidateUserForResource(currentIMC.Kind, types.NamespacedName{Name: currentIMC.Name, Namespace: currentIMC.Namespace}, v.whiteListedUsers, req.UserInfo) } -// handleRole allows/denies the request to modify role after validation. -func (v *fleetResourceValidator) handleRole(req admission.Request) admission.Response { - var role rbacv1.Role - if err := v.decodeRequestObject(req, &role); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - return validation.ValidateUserForResource(role.Kind, types.NamespacedName{Name: role.Name, Namespace: role.Namespace}, v.whiteListedUsers, req.UserInfo) -} - -// handleRoleBinding allows/denies the request to modify role after validation. -func (v *fleetResourceValidator) handleRoleBinding(req admission.Request) admission.Response { - var rb rbacv1.RoleBinding - if err := v.decodeRequestObject(req, &rb); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - return validation.ValidateUserForResource(rb.Kind, types.NamespacedName{Name: rb.Name, Namespace: rb.Namespace}, v.whiteListedUsers, req.UserInfo) -} - +// handlerNamespace allows/denies request to modify namespace after validation. func (v *fleetResourceValidator) handleNamespace(req admission.Request) admission.Response { var currentNS corev1.Namespace if err := v.decodeRequestObject(req, ¤tNS); err != nil { diff --git a/pkg/webhook/validation/uservalidation.go b/pkg/webhook/validation/uservalidation.go index 7f491d8cc..a40e9aaec 100644 --- a/pkg/webhook/validation/uservalidation.go +++ b/pkg/webhook/validation/uservalidation.go @@ -19,9 +19,12 @@ import ( ) const ( - mastersGroup = "system:masters" - serviceAccountsGroup = "system:serviceaccounts" - serviceAccountFmt = "system:serviceaccount:fleet-system:%s" + mastersGroup = "system:masters" + serviceAccountsGroup = "system:serviceaccounts" + nodeGroup = "system:nodes" + kubeSchedulerUser = "system:kube-scheduler" + kubeControllerManagerUser = "system:kube-controller-manager" + serviceAccountFmt = "system:serviceaccount:fleet-system:%s" imcStatusUpdateNotAllowedFormat = "user: %s in groups: %v is not allowed to update IMC status: %+v" imcAllowedGetMCFailed = "user: %s in groups: %v is allowed to update IMC: %+v because we failed to get MC" @@ -47,11 +50,11 @@ func ValidateUserForFleetCRD(group string, namespacedName types.NamespacedName, // ValidateUserForResource checks to see if user is allowed to modify argued resource. func ValidateUserForResource(resKind string, namespacedName types.NamespacedName, whiteListedUsers []string, userInfo authenticationv1.UserInfo) admission.Response { - if isMasterGroupUserOrWhiteListedUser(whiteListedUsers, userInfo) || isUserAuthenticatedServiceAccount(userInfo) { - klog.V(2).InfoS("user in groups is allowed to modify fleet resource", "user", userInfo.Username, "groups", userInfo.Groups, "kind", resKind, "namespacedName", namespacedName) + if isMasterGroupUserOrWhiteListedUser(whiteListedUsers, userInfo) || isUserAuthenticatedServiceAccount(userInfo) || isUserKubeScheduler(userInfo) || isUserKubeControllerManager(userInfo) || isNodeGroupUser(userInfo) { + klog.V(2).InfoS("user in groups is allowed to modify resource", "user", userInfo.Username, "groups", userInfo.Groups, "kind", resKind, "namespacedName", namespacedName) return admission.Allowed(fmt.Sprintf(resourceAllowedFormat, userInfo.Username, userInfo.Groups, resKind, namespacedName)) } - klog.V(2).InfoS("user in groups is not allowed to modify fleet resource", "user", userInfo.Username, "groups", userInfo.Groups, "kind", resKind, "namespacedName", namespacedName) + klog.V(2).InfoS("user in groups is not allowed to modify resource", "user", userInfo.Username, "groups", userInfo.Groups, "kind", resKind, "namespacedName", namespacedName) return admission.Denied(fmt.Sprintf(resourceDeniedFormat, userInfo.Username, userInfo.Groups, resKind, namespacedName)) } @@ -112,6 +115,23 @@ func isUserAuthenticatedServiceAccount(userInfo authenticationv1.UserInfo) bool return slices.Contains(userInfo.Groups, serviceAccountsGroup) } +// isUserKubeScheduler returns true if user is kube-scheduler. +func isUserKubeScheduler(userInfo authenticationv1.UserInfo) bool { + // system:kube-scheduler user only belongs to system:authenticated group hence comparing username. + return userInfo.Username == kubeSchedulerUser +} + +// isUserKubeControllerManager return true if user is kube-controller-manager. +func isUserKubeControllerManager(userInfo authenticationv1.UserInfo) bool { + // system:kube-controller-manager user only belongs to system:authenticated group hence comparing username. + return userInfo.Username == kubeControllerManagerUser +} + +// isNodeGroupUser returns true if user belongs to system:nodes group. +func isNodeGroupUser(userInfo authenticationv1.UserInfo) bool { + return slices.Contains(userInfo.Groups, nodeGroup) +} + // isMemberClusterMapFieldUpdated return true if member cluster label is updated. func isMapFieldUpdated(currentMCLabels, oldMCLabels map[string]string) bool { return !reflect.DeepEqual(currentMCLabels, oldMCLabels) diff --git a/pkg/webhook/validation/uservalidation_test.go b/pkg/webhook/validation/uservalidation_test.go index 0903d32dd..92d1eefb8 100644 --- a/pkg/webhook/validation/uservalidation_test.go +++ b/pkg/webhook/validation/uservalidation_test.go @@ -57,6 +57,24 @@ func TestValidateUserForResource(t *testing.T) { namespacedName: types.NamespacedName{Name: "test-role-binding", Namespace: "test-namespace"}, wantResponse: admission.Allowed(fmt.Sprintf(resourceAllowedFormat, "test-user", []string{serviceAccountsGroup}, "RoleBinding", types.NamespacedName{Name: "test-role-binding", Namespace: "test-namespace"})), }, + "allow user in system:node group": { + userInfo: authenticationv1.UserInfo{ + Username: "test-user", + Groups: []string{nodeGroup}, + }, + resKind: "Pod", + namespacedName: types.NamespacedName{Name: "test-pod", Namespace: "test-namespace"}, + wantResponse: admission.Allowed(fmt.Sprintf(resourceAllowedFormat, "test-user", []string{nodeGroup}, "Pod", types.NamespacedName{Name: "test-pod", Namespace: "test-namespace"})), + }, + "allow system:kube-scheduler user": { + userInfo: authenticationv1.UserInfo{ + Username: "system:kube-scheduler", + Groups: []string{"system:authenticated"}, + }, + resKind: "Pod", + namespacedName: types.NamespacedName{Name: "test-pod", Namespace: "test-namespace"}, + wantResponse: admission.Allowed(fmt.Sprintf(resourceAllowedFormat, "system:kube-scheduler", []string{"system:authenticated"}, "Pod", types.NamespacedName{Name: "test-pod", Namespace: "test-namespace"})), + }, "fail to validate user with invalid username, groups": { userInfo: authenticationv1.UserInfo{ Username: "test-user", diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index c6bf2c141..14b16adcb 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -24,7 +24,6 @@ import ( admv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,14 +47,11 @@ const ( FleetWebhookCfgName = "fleet-validating-webhook-configuration" FleetWebhookSvcName = "fleetwebhook" - crdResourceName = "customresourcedefinitions" - memberClusterResourceName = "memberclusters" - internalMemberClusterResourceName = "internalmemberclusters" - namespaceResouceName = "namespaces" - replicaSetResourceName = "replicasets" - podResourceName = "pods" - roleResourceName = "roles" - roleBindingResourceName = "rolebindings" + crdResourceName = "customresourcedefinitions" + memberClusterResourceName = "memberclusters" + namespaceResouceName = "namespaces" + replicaSetResourceName = "replicasets" + podResourceName = "pods" ) var ( @@ -218,7 +214,8 @@ func (w *Config) buildValidatingWebHooks() []admv1.ValidatingWebhook { } if w.enableGuardRail { - fleetNamespaceSelector := &metav1.LabelSelector{ + // MatchLabels/MatchExpressions values are ANDed to select resources. + fleetMemberNamespaceSelector := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { Key: fleetv1beta1.FleetResourceLabelKey, @@ -227,13 +224,35 @@ func (w *Config) buildValidatingWebHooks() []admv1.ValidatingWebhook { }, }, } - + fleetSystemNamespaceSelector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: corev1.LabelMetadataName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"fleet-system"}, + }, + }, + } + kubeNamespaceSelector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: corev1.LabelMetadataName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"kube-system", "kube-public", "kube-node-lease"}, + }, + }, + } cudOperations := []admv1.OperationType{ admv1.Create, admv1.Update, admv1.Delete, } - + namespacedResourcesRules := []admv1.RuleWithOperations{ + { + Operations: cudOperations, + Rule: createRule([]string{"*"}, []string{"*"}, []string{"*/*"}, &namespacedScope), + }, + } guardRailWebhookConfigurations := []admv1.ValidatingWebhook{ { Name: "fleet.customresourcedefinition.validating", @@ -262,23 +281,31 @@ func (w *Config) buildValidatingWebHooks() []admv1.ValidatingWebhook { }, }, { - Name: "fleet.namespacedresources.validating", + Name: "fleet.fleetmembernamespacedresources.validating", ClientConfig: w.createClientConfig(fleetresourcehandler.ValidationPath), FailurePolicy: &failPolicy, SideEffects: &sideEffortsNone, AdmissionReviewVersions: admissionReviewVersions, - NamespaceSelector: fleetNamespaceSelector, - Rules: []admv1.RuleWithOperations{ - { - Operations: cudOperations, - Rule: createRule([]string{rbacv1.SchemeGroupVersion.Group}, []string{rbacv1.SchemeGroupVersion.Version}, []string{roleResourceName, roleBindingResourceName}, &namespacedScope), - }, - { - Operations: cudOperations, - Rule: createRule([]string{fleetv1alpha1.GroupVersion.Group}, []string{fleetv1alpha1.GroupVersion.Version}, []string{internalMemberClusterResourceName, internalMemberClusterResourceName + "/status"}, &namespacedScope), - }, - // TODO: (Arvindthiru): Add Rules for pods, services, configmaps, secrets, deployments and replicasets - }, + NamespaceSelector: fleetMemberNamespaceSelector, + Rules: namespacedResourcesRules, + }, + { + Name: "fleet.fleetsystemnamespacedresources.validating", + ClientConfig: w.createClientConfig(fleetresourcehandler.ValidationPath), + FailurePolicy: &failPolicy, + SideEffects: &sideEffortsNone, + AdmissionReviewVersions: admissionReviewVersions, + NamespaceSelector: fleetSystemNamespaceSelector, + Rules: namespacedResourcesRules, + }, + { + Name: "fleet.kubenamespacedresources.validating", + ClientConfig: w.createClientConfig(fleetresourcehandler.ValidationPath), + FailurePolicy: &failPolicy, + SideEffects: &sideEffortsNone, + AdmissionReviewVersions: admissionReviewVersions, + NamespaceSelector: kubeNamespaceSelector, + Rules: namespacedResourcesRules, }, { Name: "fleet.namespace.validating", diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go index 206215e3e..0c6e28c7e 100644 --- a/pkg/webhook/webhook_test.go +++ b/pkg/webhook/webhook_test.go @@ -33,7 +33,7 @@ func TestBuildValidatingWebhooks(t *testing.T) { clientConnectionType: &url, enableGuardRail: true, }, - wantLength: 7, + wantLength: 9, }, } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 77de89d44..0cb7de7d6 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -229,12 +229,12 @@ var _ = BeforeSuite(func() { testutils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) By("create resources for webhook e2e") - testutils.CreateResourcesForWebHookE2E(ctx, HubCluster, memberNamespace.Name) + testutils.CreateResourcesForWebHookE2E(ctx, HubCluster) }) var _ = AfterSuite(func() { By("delete resources created for webhook e2e") - testutils.DeleteResourcesForWebHookE2E(ctx, HubCluster, memberNamespace.Name) + testutils.DeleteResourcesForWebHookE2E(ctx, HubCluster) By("update member cluster in the hub cluster") Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc)).Should(Succeed(), "Failed to retrieve member cluster %s in %s cluster", mc.Name, HubCluster.ClusterName) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 24f2395b0..ae524eb81 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -35,8 +35,6 @@ import ( const ( testClusterRole = "wh-test-cluster-role" testClusterRoleBinding = "wh-test-cluster-role-binding" - testRole = "wh-test-role" - testRoleBinding = "wh-test-role-binding" ) var ( @@ -197,7 +195,7 @@ func GenerateCRDObjectFromFile(cluster framework.Cluster, fs embed.FS, filepath } // CreateResourcesForWebHookE2E create resources required for Webhook E2E. -func CreateResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Cluster, memberNamespace string) { +func CreateResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Cluster) { cr := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: testClusterRole, @@ -236,45 +234,6 @@ func CreateResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Clu return hubCluster.KubeClient.Create(ctx, &crb) }, PollTimeout, PollInterval).Should(gomega.Succeed(), "failed to create cluster role binding %s for webhook E2E", crb.Name) - r := rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: testRole, - Namespace: memberNamespace, - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"*"}, - }, - }, - } - gomega.Eventually(func() error { - return hubCluster.KubeClient.Create(ctx, &r) - }, PollTimeout, PollInterval).Should(gomega.Succeed(), "failed to create role %s for webhook E2E", r.Name) - - rb := rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: testRoleBinding, - Namespace: memberNamespace, - }, - Subjects: []rbacv1.Subject{ - { - APIGroup: rbacv1.GroupName, - Kind: "User", - Name: "test-user", - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "Role", - Name: testRole, - }, - } - gomega.Eventually(func() error { - return hubCluster.KubeClient.Create(ctx, &rb) - }, PollTimeout, PollInterval).Should(gomega.Succeed(), "failed to create role binding %s for webhook E2E", rb.Name) - // Creating this MC for IMC E2E, this MC will fail to join since it's name is not configured to be recognized by the member agent // which it uses to create the namespace to watch for IMC resource. But it serves its purpose for the tests. identity := rbacv1.Subject{ @@ -308,7 +267,7 @@ func CreateResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Clu } // DeleteResourcesForWebHookE2E deletes resources created for Webhook E2E. -func DeleteResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Cluster, memberNamespace string) { +func DeleteResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Cluster) { mc := fleetv1alpha1.MemberCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-mc", @@ -321,22 +280,6 @@ func DeleteResourcesForWebHookE2E(ctx context.Context, hubCluster *framework.Clu return apierrors.IsNotFound(hubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: "test-mc", Namespace: "fleet-member-test-mc"}, &imc)) }, PollTimeout, PollInterval).Should(gomega.BeTrue(), "Failed to wait for internal member cluster %s to be deleted in %s cluster", "test-mc", hubCluster.ClusterName) - rb := rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: testRoleBinding, - Namespace: memberNamespace, - }, - } - gomega.Expect(hubCluster.KubeClient.Delete(ctx, &rb)).Should(gomega.Succeed()) - - r := rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: testRole, - Namespace: memberNamespace, - }, - } - gomega.Expect(hubCluster.KubeClient.Delete(ctx, &r)).Should(gomega.Succeed()) - crb := rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: testClusterRoleBinding, diff --git a/test/e2e/webhook_test.go b/test/e2e/webhook_test.go index ff0a217e6..45c039763 100644 --- a/test/e2e/webhook_test.go +++ b/test/e2e/webhook_test.go @@ -14,6 +14,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -21,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,8 +45,18 @@ const ( testKey = "test-key" testValue = "test-value" testRole = "wh-test-role" + testPod = "test-pod" + testService = "test-service" + testSecret = "test-secret" + testDaemonSet = "test-daemon-set" + testDeployment = "test-deployment" + testReplicaSet = "test-replica-set" + testConfigMap = "test-config-map" testRoleBinding = "wh-test-role-binding" + testCronJob = "test-cron-job" + testJob = "test-job" fleetSystemNS = "fleet-system" + kubeSystemNS = "kube-system" crdStatusErrFormat = `user: %s in groups: %v is not allowed to modify fleet CRD: %+v` resourceStatusErrFormat = `user: %s in groups: %v is not allowed to modify resource %s: %+v` @@ -436,7 +448,6 @@ var _ = Describe("Fleet's Hub cluster webhook tests", func() { Eventually(func() bool { return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: rs.Name, Namespace: rs.Namespace}, rs)) }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) - } }) It("should deny CREATE operation on ReplicaSets in a non-reserved namespace", func() { @@ -836,15 +847,44 @@ var _ = Describe("Fleet's CR Resource Handler webhook tests", func() { AgentStatus: nil, } By("expecting successful UPDATE of Internal Member Cluster Status") - err := HubCluster.ImpersonateKubeClient.Status().Update(ctx, &imc) - return err + return HubCluster.ImpersonateKubeClient.Status().Update(ctx, &imc) }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) }) }) }) var _ = Describe("Fleet's Namespaced Resource Handler webhook tests", func() { - Context("Role & Role binding validation webhook", func() { + Context("fleet guard rail e2e for role", func() { + BeforeEach(func() { + r := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRole, + Namespace: memberNamespace.Name, + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &r)).Should(Succeed()) + }) + + AfterEach(func() { + r := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRole, + Namespace: memberNamespace.Name, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &r)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: r.Name, Namespace: r.Namespace}, &r)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + It("should deny CREATE operation on role for user not in system:masters group", func() { r := rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -868,19 +908,23 @@ var _ = Describe("Fleet's Namespaced Resource Handler webhook tests", func() { }) It("should deny UPDATE operation on role for user not in system:masters group", func() { - var r rbacv1.Role - Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRole, Namespace: memberNamespace.Name}, &r)).Should(Succeed()) - - By("update role") - labels := make(map[string]string) - labels[testKey] = testValue - r.SetLabels(labels) - - By("expecting denial of operation UPDATE of role") - err := HubCluster.ImpersonateKubeClient.Update(ctx, &r) - var statusErr *k8sErrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update role call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Role", types.NamespacedName{Name: r.Name, Namespace: r.Namespace}))) + Eventually(func(g Gomega) error { + var r rbacv1.Role + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRole, Namespace: memberNamespace.Name}, &r)).Should(Succeed()) + By("update role") + labels := make(map[string]string) + labels[testKey] = testValue + r.SetLabels(labels) + By("expecting denial of operation UPDATE of role") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &r) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update role call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Role", types.NamespacedName{Name: r.Name, Namespace: r.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) }) It("should deny DELETE operation on role for user not in system:masters group", func() { @@ -899,33 +943,62 @@ var _ = Describe("Fleet's Namespaced Resource Handler webhook tests", func() { }) It("should allow update operation on role for user in system:masters group", func() { - var r rbacv1.Role - Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRole, Namespace: memberNamespace.Name}, &r)).Should(Succeed()) - - By("update labels in Role") - labels := make(map[string]string) - labels[testKey] = testValue - r.SetLabels(labels) + Eventually(func(g Gomega) error { + var r rbacv1.Role + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRole, Namespace: memberNamespace.Name}, &r)).Should(Succeed()) + By("update labels in Role") + labels := make(map[string]string) + labels[testKey] = testValue + r.SetLabels(labels) - By("expecting successful UPDATE of role") - // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] - Expect(HubCluster.KubeClient.Update(ctx, &r)).To(Succeed()) + By("expecting successful UPDATE of role") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &r) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) - By("remove new label added for test") - labels = mc.GetLabels() - delete(labels, testKey) - mc.SetLabels(labels) + Context("fleet guard rail e2e for role binding", func() { + BeforeEach(func() { + rb := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRoleBinding, + Namespace: kubeSystemNS, + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: rbacv1.GroupName, + Kind: "User", + Name: "test-user", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: testRole, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &rb)).Should(Succeed()) + }) - By("expecting successful UPDATE of role") - // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] - Expect(HubCluster.KubeClient.Update(ctx, &r)).To(Succeed()) + AfterEach(func() { + rb := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRoleBinding, + Namespace: kubeSystemNS, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &rb)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: rb.Name, Namespace: rb.Namespace}, &rb)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) }) It("should deny CREATE operation on role binding for user not in system:masters group", func() { rb := rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: testRoleBinding, - Namespace: memberNamespace.Name, + Namespace: kubeSystemNS, }, Subjects: []rbacv1.Subject{ { @@ -949,26 +1022,30 @@ var _ = Describe("Fleet's Namespaced Resource Handler webhook tests", func() { }) It("should deny UPDATE operation on role binding for user not in system:masters group", func() { - var rb rbacv1.RoleBinding - Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRoleBinding, Namespace: memberNamespace.Name}, &rb)).Should(Succeed()) - - By("update role") - labels := make(map[string]string) - labels[testKey] = testValue - rb.SetLabels(labels) - - By("expecting denial of operation UPDATE of role binding") - err := HubCluster.ImpersonateKubeClient.Update(ctx, &rb) - var statusErr *k8sErrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update role binding call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "RoleBinding", types.NamespacedName{Name: rb.Name, Namespace: rb.Namespace}))) + Eventually(func(g Gomega) error { + var rb rbacv1.RoleBinding + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRoleBinding, Namespace: kubeSystemNS}, &rb)).Should(Succeed()) + By("update role") + labels := make(map[string]string) + labels[testKey] = testValue + rb.SetLabels(labels) + By("expecting denial of operation UPDATE of role binding") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &rb) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update role binding call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "RoleBinding", types.NamespacedName{Name: rb.Name, Namespace: rb.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) }) It("should deny DELETE operation on role binding for user not in system:masters group", func() { rb := rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: testRoleBinding, - Namespace: memberNamespace.Name, + Namespace: kubeSystemNS, }, } @@ -980,80 +1057,1106 @@ var _ = Describe("Fleet's Namespaced Resource Handler webhook tests", func() { }) It("should allow update operation on role binding for user in system:masters group", func() { - var rb rbacv1.RoleBinding - Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRoleBinding, Namespace: memberNamespace.Name}, &rb)).Should(Succeed()) + Eventually(func(g Gomega) error { + var rb rbacv1.RoleBinding + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testRoleBinding, Namespace: kubeSystemNS}, &rb)).Should(Succeed()) + By("update labels in role binding") + labels := make(map[string]string) + labels[testKey] = testValue + rb.SetLabels(labels) + + By("expecting successful UPDATE of role binding") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &rb) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for pod", func() { + BeforeEach(func() { + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: kubeSystemNS, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyAlways, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &pod)).Should(Succeed()) + }) + AfterEach(func() { + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: kubeSystemNS, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &pod)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE pod operation for user not in system:masters group", func() { + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: kubeSystemNS, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyAlways, + }, + } - By("update labels in Role Binding") - labels := make(map[string]string) - labels[testKey] = testValue - rb.SetLabels(labels) + By("expecting denial of operation CREATE of pod") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &pod) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create pod call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Pod", types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}))) + }) + + It("should deny UPDATE pod operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var pod corev1.Pod + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testPod, Namespace: kubeSystemNS}, &pod)).Should(Succeed()) + pod.ObjectMeta.Labels = map[string]string{"test-key": "test-value"} + By("expecting denial of operation UPDATE of pod") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &pod) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update pod call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Pod", types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) - By("expecting successful UPDATE of role binding") - // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] - Expect(HubCluster.KubeClient.Update(ctx, &rb)).To(Succeed()) + It("should deny DELETE pod operation for user not in system:masters group", func() { + var pod corev1.Pod + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testPod, Namespace: kubeSystemNS}, &pod)).Should(Succeed()) + By("expecting denial of operation DELETE of pod") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &pod) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete pod call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Pod", types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}))) + }) - By("remove new label added for test") - labels = mc.GetLabels() - delete(labels, testKey) - mc.SetLabels(labels) + It("should allow update operation on pod for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var pod corev1.Pod + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testPod, Namespace: kubeSystemNS}, &pod)).Should(Succeed()) + By("update labels in pod") + labels := make(map[string]string) + labels[testKey] = testValue + pod.SetLabels(labels) - By("expecting successful UPDATE of role binding") - // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] - Expect(HubCluster.KubeClient.Update(ctx, &rb)).To(Succeed()) + By("expecting successful UPDATE of pod") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &pod) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) }) }) -}) -var _ = Describe("Fleet's Reserved Namespace Handler webhook tests", func() { - Context("deny requests to modify namespace with fleet/kube prefix", func() { - It("should deny CREATE operation on namespace with fleet prefix for user not in system:masters group", func() { - ns := corev1.Namespace{ + Context("fleet guard rail e2e for service", func() { + BeforeEach(func() { + service := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "fleet-namespace", + Name: testService, + Namespace: fleetSystemNS, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Protocol: corev1.ProtocolTCP, + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + }, + }, }, } - By("expecting denial of operation CREATE of namespace") - err := HubCluster.ImpersonateKubeClient.Create(ctx, &ns) + Expect(HubCluster.KubeClient.Create(ctx, &service)).Should(Succeed()) + }) + AfterEach(func() { + service := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: testService, + Namespace: fleetSystemNS, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &service)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: service.Name, Namespace: service.Namespace}, &service)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + }) + + It("should deny CREATE service operation for user not in system:masters group", func() { + service := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service1", + Namespace: fleetSystemNS, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Protocol: corev1.ProtocolTCP, + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + }, + }, + }, + } + + By("expecting denial of operation CREATE of service") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &service) var statusErr *k8sErrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create service call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Service", types.NamespacedName{Name: service.Name, Namespace: service.Namespace}))) }) - It("should deny CREATE operation on namespace with kube prefix for user not in system:masters group", func() { - ns := corev1.Namespace{ + It("should deny UPDATE service operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var service corev1.Service + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testService, Namespace: fleetSystemNS}, &service)).Should(Succeed()) + service.Spec.Ports[0].Port = 81 + By("expecting denial of operation UPDATE of service") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &service) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update service call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Service", types.NamespacedName{Name: service.Name, Namespace: service.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE service operation for user not in system:masters group", func() { + var service corev1.Service + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testService, Namespace: fleetSystemNS}, &service)).Should(Succeed()) + By("expecting denial of operation DELETE of service") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &service) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete service call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Service", types.NamespacedName{Name: service.Name, Namespace: service.Namespace}))) + }) + + It("should allow update operation on service for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var s corev1.Service + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testService, Namespace: fleetSystemNS}, &s)).Should(Succeed()) + By("update labels in service") + labels := make(map[string]string) + labels[testKey] = testValue + s.SetLabels(labels) + + By("expecting successful UPDATE of service") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &s) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for config map", func() { + BeforeEach(func() { + cm := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "kube-namespace", + Name: testConfigMap, + Namespace: memberNamespace.Name, }, + Data: map[string]string{"test-key": "test-value"}, } - By("expecting denial of operation CREATE of namespace") - err := HubCluster.ImpersonateKubeClient.Create(ctx, &ns) + Expect(HubCluster.KubeClient.Create(ctx, &cm)).Should(Succeed()) + }) + AfterEach(func() { + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMap, + Namespace: memberNamespace.Name, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &cm)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}, &cm)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE config map operation for user not in system:masters group", func() { + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMap, + Namespace: memberNamespace.Name, + }, + Data: map[string]string{"test-key": "test-value"}, + } + + By("expecting denial of operation CREATE of config map") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &cm) var statusErr *k8sErrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create config map call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "ConfigMap", types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}))) }) - It("should deny UPDATE operation on namespace with fleet prefix for user not in system:masters group", func() { - var ns corev1.Namespace - Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: fleetSystemNS}, &ns)).Should(Succeed()) - ns.Spec.Finalizers[0] = "test-finalizer" - By("expecting denial of operation UPDATE of namespace") - err := HubCluster.ImpersonateKubeClient.Update(ctx, &ns) + It("should deny UPDATE config map operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var cm corev1.ConfigMap + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testConfigMap, Namespace: memberNamespace.Name}, &cm)).Should(Succeed()) + cm.Data["test-key"] = "test-value1" + By("expecting denial of operation UPDATE of config map") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &cm) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update config map call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "ConfigMap", types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE config map operation for user not in system:masters group", func() { + var cm corev1.ConfigMap + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testConfigMap, Namespace: memberNamespace.Name}, &cm)).Should(Succeed()) + By("expecting denial of operation DELETE of config map") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &cm) var statusErr *k8sErrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete config map call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "ConfigMap", types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}))) }) - It("should deny UPDATE operation on namespace with kube prefix for user not in system:masters group", func() { - var ns corev1.Namespace - Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: "kube-system"}, &ns)).Should(Succeed()) - ns.Spec.Finalizers[0] = "test-finalizer" - By("expecting denial of operation UPDATE of namespace") - err := HubCluster.ImpersonateKubeClient.Update(ctx, &ns) + It("should allow update operation on config map for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var cm corev1.ConfigMap + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testConfigMap, Namespace: memberNamespace.Name}, &cm)).Should(Succeed()) + By("update labels in config map") + labels := make(map[string]string) + labels[testKey] = testValue + cm.SetLabels(labels) + + By("expecting successful UPDATE of config map") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &cm) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for secret", func() { + BeforeEach(func() { + s := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSecret, + Namespace: memberNamespace.Name, + }, + Data: map[string][]byte{"test-key": []byte("dGVzdA==")}, + } + Expect(HubCluster.KubeClient.Create(ctx, &s)).Should(Succeed()) + }) + AfterEach(func() { + s := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSecret, + Namespace: memberNamespace.Name, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &s)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: s.Name, Namespace: s.Namespace}, &s)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE secret operation for user not in system:masters group", func() { + s := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSecret, + Namespace: memberNamespace.Name, + }, + Data: map[string][]byte{"test-key": []byte("dGVzdA==")}, + } + + By("expecting denial of operation CREATE of secret") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &s) var statusErr *k8sErrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create secret call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Secret", types.NamespacedName{Name: s.Name, Namespace: s.Namespace}))) + }) + + It("should deny UPDATE secret operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var s corev1.Secret + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testSecret, Namespace: memberNamespace.Name}, &s)).Should(Succeed()) + s.Data["test-key"] = []byte("dmFsdWUtMg0KDQo=") + By("expecting denial of operation UPDATE of secret") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &s) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update secret call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Secret", types.NamespacedName{Name: s.Name, Namespace: s.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE secret operation for user not in system:masters group", func() { + var s corev1.Secret + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testSecret, Namespace: memberNamespace.Name}, &s)).Should(Succeed()) + By("expecting denial of operation DELETE of secret") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &s) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete secret call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Secret", types.NamespacedName{Name: s.Name, Namespace: s.Namespace}))) + }) + + It("should allow update operation on secret for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var s corev1.Secret + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testSecret, Namespace: memberNamespace.Name}, &s)).Should(Succeed()) + By("update labels in secret") + labels := make(map[string]string) + labels[testKey] = testValue + s.SetLabels(labels) + By("expecting successful UPDATE of secret") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &s) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for deployment", func() { + BeforeEach(func() { + d := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDeployment, + Namespace: kubeSystemNS, + Labels: map[string]string{"app": "busybox"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "busybox"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "busybox"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "busybox", + }, + }, + }, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &d)).Should(Succeed()) + }) + AfterEach(func() { + d := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDeployment, + Namespace: kubeSystemNS, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &d)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: d.Name, Namespace: d.Namespace}, &d)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE deployment operation for user not in system:masters group", func() { + d := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDeployment, + Namespace: kubeSystemNS, + Labels: map[string]string{"app": "busybox"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "busybox"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "busybox"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "busybox", + }, + }, + }, + }, + }, + } + + By("expecting denial of operation CREATE of deployment") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &d) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create deployment call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Deployment", types.NamespacedName{Name: d.Name, Namespace: d.Namespace}))) + }) + + It("should deny UPDATE deployment operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var d appsv1.Deployment + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testDeployment, Namespace: kubeSystemNS}, &d)).Should(Succeed()) + d.ObjectMeta.Labels = map[string]string{"test-key": "test-value"} + By("expecting denial of operation UPDATE of deployment") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &d) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update deployment call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Deployment", types.NamespacedName{Name: d.Name, Namespace: d.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE deployment operation for user not in system:masters group", func() { + var d appsv1.Deployment + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testDeployment, Namespace: kubeSystemNS}, &d)).Should(Succeed()) + By("expecting denial of operation DELETE of deployment") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &d) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete deployment call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Deployment", types.NamespacedName{Name: d.Name, Namespace: d.Namespace}))) + }) + + It("should allow update operation on deployment for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var d appsv1.Deployment + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testDeployment, Namespace: kubeSystemNS}, &d)).Should(Succeed()) + By("update labels in deployment") + labels := make(map[string]string) + labels[testKey] = testValue + d.SetLabels(labels) + + By("expecting successful UPDATE of deployment") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &d) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for replica set", func() { + BeforeEach(func() { + rs := appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testReplicaSet, + Namespace: memberNamespace.Name, + Labels: map[string]string{"tier": "frontend"}, + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"tier": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"tier": "frontend"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "busybox", + }, + }, + }, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &rs)).Should(Succeed()) + }) + AfterEach(func() { + rs := appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testReplicaSet, + Namespace: memberNamespace.Name, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &rs)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: rs.Name, Namespace: rs.Namespace}, &rs)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE replica set operation for user not in system:masters group", func() { + rs := appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testReplicaSet, + Namespace: memberNamespace.Name, + Labels: map[string]string{"tier": "frontend"}, + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"tier": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"tier": "frontend"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "busybox", + }, + }, + }, + }, + }, + } + + By("expecting denial of operation CREATE of replica set") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &rs) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create replica set call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "ReplicaSet", types.NamespacedName{Name: rs.Name, Namespace: rs.Namespace}))) + }) + + It("should deny UPDATE replica set operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var rs appsv1.ReplicaSet + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testReplicaSet, Namespace: memberNamespace.Name}, &rs)).Should(Succeed()) + rs.ObjectMeta.Labels = map[string]string{"test-key": "test-value"} + By("expecting denial of operation UPDATE of replica set") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &rs) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update replica set call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "ReplicaSet", types.NamespacedName{Name: rs.Name, Namespace: rs.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE replica set operation for user not in system:masters group", func() { + var rs appsv1.ReplicaSet + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testReplicaSet, Namespace: memberNamespace.Name}, &rs)).Should(Succeed()) + By("expecting denial of operation DELETE of replica set") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &rs) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete replica set call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "ReplicaSet", types.NamespacedName{Name: rs.Name, Namespace: rs.Namespace}))) + }) + + It("should allow update operation on replica set for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var rs appsv1.ReplicaSet + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testReplicaSet, Namespace: memberNamespace.Name}, &rs)).Should(Succeed()) + By("update labels in replica set") + labels := make(map[string]string) + labels[testKey] = testValue + rs.SetLabels(labels) + + By("expecting successful UPDATE of replica set") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &rs) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for daemon set", func() { + BeforeEach(func() { + hostPath := &corev1.HostPathVolumeSource{ + Path: "/var/log", + } + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDaemonSet, + Namespace: fleetSystemNS, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "fluentd-elasticsearch"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"name": "fluentd-elasticsearch"}, + }, + Spec: corev1.PodSpec{ + Tolerations: []corev1.Toleration{ + { + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + Containers: []corev1.Container{ + { + Name: "fluentd-elasticsearch", + Image: "quay.io/fluentd_elasticsearch/fluentd:v2.5.2", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "varlog", + VolumeSource: corev1.VolumeSource{ + HostPath: hostPath, + }, + }, + }, + }, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &ds)).Should(Succeed()) + }) + AfterEach(func() { + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDaemonSet, + Namespace: fleetSystemNS, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &ds)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}, &ds)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE daemon set operation for user not in system:masters group", func() { + hostPath := &corev1.HostPathVolumeSource{ + Path: "/var/log", + } + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDaemonSet, + Namespace: fleetSystemNS, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "fluentd-elasticsearch"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"name": "fluentd-elasticsearch"}, + }, + Spec: corev1.PodSpec{ + Tolerations: []corev1.Toleration{ + { + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + Containers: []corev1.Container{ + { + Name: "fluentd-elasticsearch", + Image: "quay.io/fluentd_elasticsearch/fluentd:v2.5.2", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "varlog", + VolumeSource: corev1.VolumeSource{ + HostPath: hostPath, + }, + }, + }, + }, + }, + }, + } + By("expecting denial of operation CREATE of daemon set") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &ds) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create daemon set call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "DaemonSet", types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}))) + }) + + It("should deny UPDATE daemon set operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var ds appsv1.DaemonSet + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testDaemonSet, Namespace: fleetSystemNS}, &ds)).Should(Succeed()) + ds.ObjectMeta.Labels = map[string]string{"test-key": "test-value"} + By("expecting denial of operation UPDATE of daemon set") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &ds) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update dameon set call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "DaemonSet", types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE daemon set operation for user not in system:masters group", func() { + var ds appsv1.DaemonSet + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testDaemonSet, Namespace: fleetSystemNS}, &ds)).Should(Succeed()) + By("expecting denial of operation DELETE of daemon set") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &ds) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete daemon set call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "DaemonSet", types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}))) + }) + + It("should allow update operation on daemon set for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var ds appsv1.DaemonSet + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testDaemonSet, Namespace: fleetSystemNS}, &ds)).Should(Succeed()) + By("update labels in daemon set") + labels := make(map[string]string) + labels[testKey] = testValue + ds.SetLabels(labels) + + By("expecting successful UPDATE of daemon set") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &ds) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for cronjob", func() { + BeforeEach(func() { + cj := batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: testCronJob, + Namespace: kubeSystemNS, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "* * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "cronjob-busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyOnFailure, + }, + }, + }, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &cj)).Should(Succeed()) + }) + AfterEach(func() { + cj := batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: testCronJob, + Namespace: kubeSystemNS, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &cj)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cj.Name, Namespace: cj.Namespace}, &cj)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE cronjob operation for user not in system:masters group", func() { + cj := batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: testCronJob, + Namespace: kubeSystemNS, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "* * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "cronjob-busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyOnFailure, + }, + }, + }, + }, + }, + } + By("expecting denial of operation CREATE of cronjob") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &cj) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create cronjob call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "CronJob", types.NamespacedName{Name: cj.Name, Namespace: cj.Namespace}))) + }) + + It("should deny UPDATE cronjob operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var cj batchv1.CronJob + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testCronJob, Namespace: kubeSystemNS}, &cj)).Should(Succeed()) + cj.ObjectMeta.Labels = map[string]string{"test-key": "test-value"} + By("expecting denial of operation UPDATE of cronjob") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &cj) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update cronjob call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "CronJob", types.NamespacedName{Name: cj.Name, Namespace: cj.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE cronjob operation for user not in system:masters group", func() { + var cj batchv1.CronJob + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testCronJob, Namespace: kubeSystemNS}, &cj)).Should(Succeed()) + By("expecting denial of operation DELETE of cronjob") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &cj) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete cronjob call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "CronJob", types.NamespacedName{Name: cj.Name, Namespace: cj.Namespace}))) + }) + + It("should allow update operation on cronjob for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var cj batchv1.CronJob + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testCronJob, Namespace: kubeSystemNS}, &cj)).Should(Succeed()) + By("update labels in cronjob") + labels := make(map[string]string) + labels[testKey] = testValue + cj.SetLabels(labels) + + By("expecting successful UPDATE of cronjob") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &cj) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) + + Context("fleet guard rail e2e for job", func() { + BeforeEach(func() { + j := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: testJob, + Namespace: memberNamespace.Name, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "job-busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, &j)).Should(Succeed()) + }) + AfterEach(func() { + j := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: testJob, + Namespace: memberNamespace.Name, + }, + } + Expect(HubCluster.KubeClient.Delete(ctx, &j)).Should(Succeed()) + Eventually(func() bool { + return k8sErrors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: j.Name, Namespace: j.Namespace}, &j)) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("should deny CREATE job operation for user not in system:masters group", func() { + j := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: testJob, + Namespace: memberNamespace.Name, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "busybox", + ImagePullPolicy: corev1.PullIfNotPresent, + Name: "job-busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + By("expecting denial of operation CREATE of job") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &j) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create job call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Job", types.NamespacedName{Name: j.Name, Namespace: j.Namespace}))) + }) + + It("should deny UPDATE job operation for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var j batchv1.Job + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testJob, Namespace: memberNamespace.Name}, &j)).Should(Succeed()) + j.ObjectMeta.Labels = map[string]string{"test-key": "test-value"} + By("expecting denial of operation UPDATE of job") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &j) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update job call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Job", types.NamespacedName{Name: j.Name, Namespace: j.Namespace}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny DELETE job operation for user not in system:masters group", func() { + var j batchv1.Job + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testJob, Namespace: memberNamespace.Name}, &j)).Should(Succeed()) + By("expecting denial of operation DELETE of job") + err := HubCluster.ImpersonateKubeClient.Delete(ctx, &j) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete job call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Job", types.NamespacedName{Name: j.Name, Namespace: j.Namespace}))) + }) + + It("should allow update operation on job for user in system:masters group", func() { + Eventually(func(g Gomega) error { + var j batchv1.Job + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testJob, Namespace: memberNamespace.Name}, &j)).Should(Succeed()) + By("update labels in job") + labels := make(map[string]string) + labels[testKey] = testValue + j.SetLabels(labels) + + By("expecting successful UPDATE of job") + // The user associated with KubeClient is kubernetes-admin in groups: [system:masters, system:authenticated] + return HubCluster.KubeClient.Update(ctx, &j) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + }) +}) + +var _ = Describe("Fleet's Reserved Namespace Handler webhook tests", func() { + Context("deny requests to modify namespace with fleet/kube prefix", func() { + It("should deny CREATE operation on namespace with fleet prefix for user not in system:masters group", func() { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fleet-namespace", + }, + } + By("expecting denial of operation CREATE of namespace") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &ns) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) + }) + + It("should deny CREATE operation on namespace with kube prefix for user not in system:masters group", func() { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-namespace", + }, + } + By("expecting denial of operation CREATE of namespace") + err := HubCluster.ImpersonateKubeClient.Create(ctx, &ns) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) }) + It("should deny UPDATE operation on namespace with fleet prefix for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var ns corev1.Namespace + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: fleetSystemNS}, &ns)).Should(Succeed()) + ns.Spec.Finalizers[0] = "test-finalizer" + By("expecting denial of operation UPDATE of namespace") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &ns) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny UPDATE operation on namespace with kube prefix for user not in system:masters group", func() { + Eventually(func(g Gomega) error { + var ns corev1.Namespace + g.Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: kubeSystemNS}, &ns)).Should(Succeed()) + ns.Spec.Finalizers[0] = "test-finalizer" + By("expecting denial of operation UPDATE of namespace") + err := HubCluster.ImpersonateKubeClient.Update(ctx, &ns) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update namespace call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf(resourceStatusErrFormat, testUser, testGroups, "Namespace", types.NamespacedName{Name: ns.Name}))) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + It("should deny DELETE operation on namespace with fleet prefix for user not in system:masters group", func() { var ns corev1.Namespace Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: fleetSystemNS}, &ns)).Should(Succeed())