add support for placement constraints

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
This commit is contained in:
Nicolas De Loof 2020-03-02 09:24:06 +01:00 committed by aiordache
parent 5306f38f70
commit d1e40e9c36
3 changed files with 294 additions and 1 deletions

126
convert/placement.go Normal file
View File

@ -0,0 +1,126 @@
package convert
import (
"regexp"
"strings"
"github.com/compose-spec/compose-go/types"
"github.com/pkg/errors"
apiv1 "k8s.io/api/core/v1"
)
var constraintEquals = regexp.MustCompile(`([\w\.]*)\W*(==|!=)\W*([\w\.]*)`)
const (
kubernetesOs = "beta.kubernetes.io/os"
kubernetesArch = "beta.kubernetes.io/arch"
kubernetesHostname = "kubernetes.io/hostname"
)
// node.id Node ID node.id == 2ivku8v2gvtg4
// node.hostname Node hostname node.hostname != node-2
// node.role Node role node.role == manager
// node.labels user defined node labels node.labels.security == high
// engine.labels Docker Engine's labels engine.labels.operatingsystem == ubuntu 14.04
func toNodeAffinity(deploy *types.DeployConfig) (*apiv1.Affinity, error) {
constraints := []string{}
if deploy != nil && deploy.Placement.Constraints != nil {
constraints = deploy.Placement.Constraints
}
requirements := []apiv1.NodeSelectorRequirement{}
for _, constraint := range constraints {
matches := constraintEquals.FindStringSubmatch(constraint)
if len(matches) == 4 {
key := matches[1]
operator, err := toRequirementOperator(matches[2])
if err != nil {
return nil, err
}
value := matches[3]
switch {
case key == constraintOs:
requirements = append(requirements, apiv1.NodeSelectorRequirement{
Key: kubernetesOs,
Operator: operator,
Values: []string{value},
})
case key == constraintArch:
requirements = append(requirements, apiv1.NodeSelectorRequirement{
Key: kubernetesArch,
Operator: operator,
Values: []string{value},
})
case key == constraintHostname:
requirements = append(requirements, apiv1.NodeSelectorRequirement{
Key: kubernetesHostname,
Operator: operator,
Values: []string{value},
})
case strings.HasPrefix(key, constraintLabelPrefix):
requirements = append(requirements, apiv1.NodeSelectorRequirement{
Key: strings.TrimPrefix(key, constraintLabelPrefix),
Operator: operator,
Values: []string{value},
})
}
}
}
if !hasRequirement(requirements, kubernetesOs) {
requirements = append(requirements, apiv1.NodeSelectorRequirement{
Key: kubernetesOs,
Operator: apiv1.NodeSelectorOpIn,
Values: []string{"linux"},
})
}
if !hasRequirement(requirements, kubernetesArch) {
requirements = append(requirements, apiv1.NodeSelectorRequirement{
Key: kubernetesArch,
Operator: apiv1.NodeSelectorOpIn,
Values: []string{"amd64"},
})
}
return &apiv1.Affinity{
NodeAffinity: &apiv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &apiv1.NodeSelector{
NodeSelectorTerms: []apiv1.NodeSelectorTerm{
{
MatchExpressions: requirements,
},
},
},
},
}, nil
}
const (
constraintOs = "node.platform.os"
constraintArch = "node.platform.arch"
constraintHostname = "node.hostname"
constraintLabelPrefix = "node.labels."
)
func hasRequirement(requirements []apiv1.NodeSelectorRequirement, key string) bool {
for _, r := range requirements {
if r.Key == key {
return true
}
}
return false
}
func toRequirementOperator(sign string) (apiv1.NodeSelectorOperator, error) {
switch sign {
case "==":
return apiv1.NodeSelectorOpIn, nil
case "!=":
return apiv1.NodeSelectorOpNotIn, nil
case ">":
return apiv1.NodeSelectorOpGt, nil
case "<":
return apiv1.NodeSelectorOpLt, nil
default:
return "", errors.Errorf("operator %s not supported", sign)
}
}

163
convert/placement_test.go Normal file
View File

@ -0,0 +1,163 @@
package convert
import (
"reflect"
"sort"
"testing"
"github.com/compose-spec/compose-go/types"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
)
func TestToPodWithPlacement(t *testing.T) {
podTemplate := podTemplate(t, `
version: "3"
services:
redis:
image: redis:alpine
deploy:
placement:
constraints:
- node.platform.os == linux
- node.platform.arch == amd64
- node.hostname == node01
- node.labels.label1 == value1
- node.labels.label2.subpath != value2
`)
expectedRequirements := []apiv1.NodeSelectorRequirement{
{Key: "beta.kubernetes.io/os", Operator: apiv1.NodeSelectorOpIn, Values: []string{"linux"}},
{Key: "beta.kubernetes.io/arch", Operator: apiv1.NodeSelectorOpIn, Values: []string{"amd64"}},
{Key: "kubernetes.io/hostname", Operator: apiv1.NodeSelectorOpIn, Values: []string{"node01"}},
{Key: "label1", Operator: apiv1.NodeSelectorOpIn, Values: []string{"value1"}},
{Key: "label2.subpath", Operator: apiv1.NodeSelectorOpNotIn, Values: []string{"value2"}},
}
requirements := podTemplate.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions
sort.Slice(expectedRequirements, func(i, j int) bool { return expectedRequirements[i].Key < expectedRequirements[j].Key })
sort.Slice(requirements, func(i, j int) bool { return requirements[i].Key < requirements[j].Key })
assert.EqualValues(t, expectedRequirements, requirements)
}
type keyValue struct {
key string
value string
}
func kv(key, value string) keyValue {
return keyValue{key: key, value: value}
}
func makeExpectedAffinity(kvs ...keyValue) *apiv1.Affinity {
var matchExpressions []apiv1.NodeSelectorRequirement
for _, kv := range kvs {
matchExpressions = append(
matchExpressions,
apiv1.NodeSelectorRequirement{
Key: kv.key,
Operator: apiv1.NodeSelectorOpIn,
Values: []string{kv.value},
},
)
}
return &apiv1.Affinity{
NodeAffinity: &apiv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &apiv1.NodeSelector{
NodeSelectorTerms: []apiv1.NodeSelectorTerm{
{
MatchExpressions: matchExpressions,
},
},
},
},
}
}
func TestNodeAfinity(t *testing.T) {
cases := []struct {
name string
source []string
expected *apiv1.Affinity
}{
{
name: "nil",
expected: makeExpectedAffinity(
kv(kubernetesOs, "linux"),
kv(kubernetesArch, "amd64"),
),
},
{
name: "hostname",
source: []string{"node.hostname == test"},
expected: makeExpectedAffinity(
kv(kubernetesHostname, "test"),
kv(kubernetesOs, "linux"),
kv(kubernetesArch, "amd64"),
),
},
{
name: "os",
source: []string{"node.platform.os == windows"},
expected: makeExpectedAffinity(
kv(kubernetesOs, "windows"),
kv(kubernetesArch, "amd64"),
),
},
{
name: "arch",
source: []string{"node.platform.arch == arm64"},
expected: makeExpectedAffinity(
kv(kubernetesArch, "arm64"),
kv(kubernetesOs, "linux"),
),
},
{
name: "custom-labels",
source: []string{"node.platform.os == windows", "node.platform.arch == arm64"},
expected: makeExpectedAffinity(
kv(kubernetesArch, "arm64"),
kv(kubernetesOs, "windows"),
),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
result, err := toNodeAffinity(&types.DeployConfig{
Placement: types.Placement{
Constraints: c.source,
},
})
assert.NoError(t, err)
assert.True(t, nodeAffinityMatch(c.expected, result))
})
}
}
func nodeSelectorRequirementsToMap(source []apiv1.NodeSelectorRequirement, result map[string]apiv1.NodeSelectorRequirement) {
for _, t := range source {
result[t.Key] = t
}
}
func nodeAffinityMatch(expected, actual *apiv1.Affinity) bool {
expectedTerms := expected.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
actualTerms := actual.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
expectedExpressions := make(map[string]apiv1.NodeSelectorRequirement)
expectedFields := make(map[string]apiv1.NodeSelectorRequirement)
actualExpressions := make(map[string]apiv1.NodeSelectorRequirement)
actualFields := make(map[string]apiv1.NodeSelectorRequirement)
for _, v := range expectedTerms {
nodeSelectorRequirementsToMap(v.MatchExpressions, expectedExpressions)
nodeSelectorRequirementsToMap(v.MatchFields, expectedFields)
}
for _, v := range actualTerms {
nodeSelectorRequirementsToMap(v.MatchExpressions, actualExpressions)
nodeSelectorRequirementsToMap(v.MatchFields, actualFields)
}
return reflect.DeepEqual(expectedExpressions, actualExpressions) && reflect.DeepEqual(expectedFields, actualFields)
}

View File

@ -19,6 +19,10 @@ import (
func toPodTemplate(serviceConfig types.ServiceConfig, labels map[string]string, model *compose.Project) (apiv1.PodTemplateSpec, error) {
tpl := apiv1.PodTemplateSpec{}
nodeAffinity, err := toNodeAffinity(serviceConfig.Deploy)
if err != nil {
return apiv1.PodTemplateSpec{}, err
}
hostAliases, err := toHostAliases(serviceConfig.ExtraHosts)
if err != nil {
return apiv1.PodTemplateSpec{}, err
@ -70,7 +74,7 @@ func toPodTemplate(serviceConfig types.ServiceConfig, labels map[string]string,
tpl.Spec.Hostname = serviceConfig.Hostname
tpl.Spec.TerminationGracePeriodSeconds = toTerminationGracePeriodSeconds(serviceConfig.StopGracePeriod)
tpl.Spec.HostAliases = hostAliases
// FIXME tpl.Spec.Affinity = nodeAffinity
tpl.Spec.Affinity = nodeAffinity
// we dont want to remove all containers and recreate them because:
// an admission plugin can add sidecar containers
// we for sure want to keep the main container to be additive