mirror of https://github.com/docker/compose.git
refactored compose in charts
Signed-off-by: aiordache <anca.iordache@docker.com>
This commit is contained in:
parent
68b29f569b
commit
1574ebdfff
|
@ -1,95 +0,0 @@
|
|||
package compose
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
internal "github.com/docker/helm-prototype/pkg/compose/internal"
|
||||
"github.com/docker/helm-prototype/pkg/compose/internal/helm"
|
||||
)
|
||||
|
||||
var Settings = internal.GetDefault()
|
||||
|
||||
type ComposeProject struct {
|
||||
config *types.Config
|
||||
helm *helm.HelmActions
|
||||
ProjectDir string
|
||||
Name string `yaml:"-" json:"-"`
|
||||
}
|
||||
|
||||
func Load(name string, configpaths []string) (*ComposeProject, error) {
|
||||
model, workingDir, err := internal.GetConfig(name, configpaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
if model != nil {
|
||||
name = filepath.Base(filepath.Dir(model.Filename))
|
||||
} else if workingDir != "" {
|
||||
name = filepath.Base(filepath.Dir(workingDir))
|
||||
}
|
||||
}
|
||||
|
||||
return &ComposeProject{
|
||||
config: model,
|
||||
helm: helm.NewHelmActions(nil),
|
||||
ProjectDir: workingDir,
|
||||
Name: name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cp *ComposeProject) GenerateChart(dirname string) error {
|
||||
if cp.config == nil {
|
||||
return errors.New(`Can't find a suitable configuration file in this directory or any
|
||||
parent. Are you in the right directory?`)
|
||||
}
|
||||
if dirname == "" {
|
||||
dirname = cp.config.Filename
|
||||
if strings.Contains(dirname, ".") {
|
||||
splits := strings.SplitN(dirname, ".", 2)
|
||||
dirname = splits[0]
|
||||
}
|
||||
}
|
||||
name := filepath.Base(dirname)
|
||||
dirname = filepath.Dir(dirname)
|
||||
return internal.SaveChart(cp.config, name, dirname)
|
||||
}
|
||||
|
||||
func (cp *ComposeProject) Install(name, path string) error {
|
||||
if path != "" {
|
||||
return cp.helm.InstallChartFromDir(name, path)
|
||||
}
|
||||
if cp.config == nil {
|
||||
return errors.New(`Can't find a suitable configuration file in this directory or any
|
||||
parent. Are you in the right directory?`)
|
||||
}
|
||||
if name == "" {
|
||||
name = cp.Name
|
||||
}
|
||||
chart, err := internal.GetChartInMemory(cp.config, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cp.helm.InstallChart(name, chart)
|
||||
}
|
||||
|
||||
func (cp *ComposeProject) Uninstall(name string) error {
|
||||
if name == "" {
|
||||
if cp.config == nil {
|
||||
return errors.New(`Can't find a suitable configuration file in this directory or any
|
||||
parent. Are you in the right directory?
|
||||
|
||||
Alternative: uninstall [INSTALLATION NAME]
|
||||
`)
|
||||
}
|
||||
name = cp.Name
|
||||
}
|
||||
return cp.helm.Uninstall(name)
|
||||
}
|
||||
|
||||
func (cp *ComposeProject) List() (map[string]interface{}, error) {
|
||||
return cp.helm.ListReleases()
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package env
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/helm-prototype/pkg/compose/internal/helm"
|
||||
"github.com/docker/helm-prototype/pkg/compose/internal/kube"
|
||||
"github.com/docker/helm-prototype/pkg/compose/internal/utils"
|
||||
chart "helm.sh/helm/v3/pkg/chart"
|
||||
util "helm.sh/helm/v3/pkg/chartutil"
|
||||
helmenv "helm.sh/helm/v3/pkg/cli"
|
||||
)
|
||||
|
||||
func GetDefault() *helmenv.EnvSettings {
|
||||
return helmenv.New()
|
||||
}
|
||||
|
||||
func Environment() map[string]string {
|
||||
vars := make(map[string]string)
|
||||
env := os.Environ()
|
||||
for _, v := range env {
|
||||
k := strings.SplitN(v, "=", 2)
|
||||
vars[k[0]] = k[1]
|
||||
}
|
||||
return vars
|
||||
}
|
||||
|
||||
func GetConfig(name string, configPaths []string) (*types.Config, string, error) {
|
||||
workingDir, configs, err := utils.GetConfigs(
|
||||
name,
|
||||
configPaths,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if configs == nil {
|
||||
return nil, "", nil
|
||||
}
|
||||
config, err := loader.Load(types.ConfigDetails{
|
||||
WorkingDir: workingDir,
|
||||
ConfigFiles: configs,
|
||||
Environment: Environment(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return config, workingDir, nil
|
||||
}
|
||||
|
||||
func GetChartInMemory(config *types.Config, name string) (*chart.Chart, error) {
|
||||
for k, v := range config.Volumes {
|
||||
volumeName := strings.ReplaceAll(k, "_", "-")
|
||||
if volumeName != k {
|
||||
config.Volumes[volumeName] = v
|
||||
delete(config.Volumes, k)
|
||||
}
|
||||
}
|
||||
objects, err := kube.MapToKubernetesObjects(config, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//in memory files
|
||||
return helm.ConvertToChart(name, objects)
|
||||
}
|
||||
|
||||
func SaveChart(config *types.Config, name, dest string) error {
|
||||
chart, err := GetChartInMemory(config, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return util.SaveDir(chart, dest)
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"html/template"
|
||||
"path/filepath"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
chart "helm.sh/helm/v3/pkg/chart"
|
||||
loader "helm.sh/helm/v3/pkg/chart/loader"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func ConvertToChart(name string, objects map[string]runtime.Object) (*chart.Chart, error) {
|
||||
|
||||
files := []*loader.BufferedFile{
|
||||
&loader.BufferedFile{
|
||||
Name: "README.md",
|
||||
Data: []byte("This chart was created by converting a Compose file"),
|
||||
}}
|
||||
|
||||
chart := `name: {{.Name}}
|
||||
description: A generated Helm Chart for {{.Name}} from Skippbox Kompose
|
||||
version: 0.0.1
|
||||
apiVersion: v1
|
||||
keywords:
|
||||
- {{.Name}}
|
||||
sources:
|
||||
home:
|
||||
`
|
||||
|
||||
t, err := template.New("ChartTmpl").Parse(chart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type ChartDetails struct {
|
||||
Name string
|
||||
}
|
||||
var chartData bytes.Buffer
|
||||
err = t.Execute(&chartData, ChartDetails{Name: name})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, &loader.BufferedFile{
|
||||
Name: "Chart.yaml",
|
||||
Data: chartData.Bytes(),
|
||||
})
|
||||
|
||||
for name, o := range objects {
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf, err := jsonToYaml(j, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, &loader.BufferedFile{
|
||||
Name: filepath.Join("templates", name),
|
||||
Data: buf,
|
||||
})
|
||||
|
||||
}
|
||||
return loader.LoadFiles(files)
|
||||
}
|
||||
|
||||
// Convert JSON to YAML.
|
||||
func jsonToYaml(j []byte, spaces int) ([]byte, error) {
|
||||
// Convert the JSON to an object.
|
||||
var jsonObj interface{}
|
||||
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||
// etc.) when unmarshling to interface{}, it just picks float64
|
||||
// universally. go-yaml does go through the effort of picking the right
|
||||
// number type, so we can preserve number type throughout this process.
|
||||
err := yaml.Unmarshal(j, &jsonObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
encoder := yaml.NewEncoder(&b)
|
||||
encoder.SetIndent(spaces)
|
||||
if err := encoder.Encode(jsonObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
action "helm.sh/helm/v3/pkg/action"
|
||||
chart "helm.sh/helm/v3/pkg/chart"
|
||||
loader "helm.sh/helm/v3/pkg/chart/loader"
|
||||
env "helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/release"
|
||||
)
|
||||
|
||||
type HelmActions struct {
|
||||
Config *action.Configuration
|
||||
Settings *env.EnvSettings
|
||||
kube_conn_init bool
|
||||
}
|
||||
|
||||
func NewHelmActions(settings *env.EnvSettings) *HelmActions {
|
||||
if settings == nil {
|
||||
settings = env.New()
|
||||
}
|
||||
return &HelmActions{
|
||||
Config: new(action.Configuration),
|
||||
Settings: settings,
|
||||
kube_conn_init: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (hc *HelmActions) initKubeClient() error {
|
||||
if hc.kube_conn_init {
|
||||
return nil
|
||||
}
|
||||
if err := hc.Config.Init(
|
||||
hc.Settings.RESTClientGetter(),
|
||||
hc.Settings.Namespace(),
|
||||
"configmap",
|
||||
log.Printf,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := hc.Config.KubeClient.IsReachable(); err != nil {
|
||||
return err
|
||||
}
|
||||
hc.kube_conn_init = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hc *HelmActions) InstallChartFromDir(name string, chartpath string) error {
|
||||
chart, err := loader.Load(chartpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hc.InstallChart(name, chart)
|
||||
}
|
||||
|
||||
func (hc *HelmActions) InstallChart(name string, chart *chart.Chart) error {
|
||||
hc.initKubeClient()
|
||||
|
||||
actInstall := action.NewInstall(hc.Config)
|
||||
actInstall.ReleaseName = name
|
||||
actInstall.Namespace = hc.Settings.Namespace()
|
||||
|
||||
release, err := actInstall.Run(chart, map[string]interface{}{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("Release status: ", release.Info.Status)
|
||||
log.Println(release.Info.Description)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hc *HelmActions) Uninstall(name string) error {
|
||||
hc.initKubeClient()
|
||||
release, err := hc.Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if release == nil {
|
||||
return errors.New("No release found with the name provided.")
|
||||
}
|
||||
actUninstall := action.NewUninstall(hc.Config)
|
||||
response, err := actUninstall.Run(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println(response.Release.Info.Description)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hc *HelmActions) Get(name string) (*release.Release, error) {
|
||||
hc.initKubeClient()
|
||||
|
||||
actGet := action.NewGet(hc.Config)
|
||||
return actGet.Run(name)
|
||||
}
|
||||
|
||||
func (hc *HelmActions) ListReleases() (map[string]interface{}, error) {
|
||||
hc.initKubeClient()
|
||||
|
||||
actList := action.NewList(hc.Config)
|
||||
releases, err := actList.Run()
|
||||
if err != nil {
|
||||
return map[string]interface{}{}, err
|
||||
}
|
||||
result := map[string]interface{}{}
|
||||
for _, rel := range releases {
|
||||
result[rel.Name] = map[string]string{
|
||||
"Status": string(rel.Info.Status),
|
||||
"Description": rel.Info.Description,
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
resource "k8s.io/apimachinery/pkg/api/resource"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func MapToKubernetesObjects(model *types.Config, name string) (map[string]runtime.Object, error) {
|
||||
objects := map[string]runtime.Object{}
|
||||
|
||||
for _, service := range model.Services {
|
||||
svcObject := mapToService(model, service)
|
||||
if svcObject != nil {
|
||||
objects[fmt.Sprintf("%s-service.yaml", service.Name)] = svcObject
|
||||
} else {
|
||||
log.Println("Missing port mapping from service config.")
|
||||
}
|
||||
|
||||
if service.Deploy != nil && service.Deploy.Mode == "global" {
|
||||
daemonset, err := mapToDaemonset(service, model, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objects[fmt.Sprintf("%s-daemonset.yaml", service.Name)] = daemonset
|
||||
} else {
|
||||
deployment, err := mapToDeployment(service, model, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objects[fmt.Sprintf("%s-deployment.yaml", service.Name)] = deployment
|
||||
}
|
||||
for _, vol := range service.Volumes {
|
||||
if vol.Type == "volume" {
|
||||
vol.Source = strings.ReplaceAll(vol.Source, "_", "-")
|
||||
objects[fmt.Sprintf("%s-persistentvolumeclaim.yaml", vol.Source)] = mapToPVC(service, vol)
|
||||
}
|
||||
}
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func mapToService(model *types.Config, service types.ServiceConfig) *core.Service {
|
||||
ports := []core.ServicePort{}
|
||||
for _, p := range service.Ports {
|
||||
ports = append(ports,
|
||||
core.ServicePort{
|
||||
Name: fmt.Sprintf("%d-%s", p.Target, strings.ToLower(string(p.Protocol))),
|
||||
Port: int32(p.Target),
|
||||
TargetPort: intstr.FromInt(int(p.Target)),
|
||||
Protocol: toProtocol(p.Protocol),
|
||||
})
|
||||
}
|
||||
if len(ports) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &core.Service{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
},
|
||||
Spec: core.ServiceSpec{
|
||||
Selector: map[string]string{"com.docker.compose.service": service.Name},
|
||||
Ports: ports,
|
||||
Type: mapServiceToServiceType(service, model),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mapServiceToServiceType(service types.ServiceConfig, model *types.Config) core.ServiceType {
|
||||
serviceType := core.ServiceTypeClusterIP
|
||||
if len(service.Networks) == 0 {
|
||||
// service is implicitly attached to "default" network
|
||||
serviceType = core.ServiceTypeLoadBalancer
|
||||
}
|
||||
for name := range service.Networks {
|
||||
if !model.Networks[name].Internal {
|
||||
serviceType = core.ServiceTypeLoadBalancer
|
||||
}
|
||||
}
|
||||
for _, port := range service.Ports {
|
||||
if port.Published != 0 {
|
||||
serviceType = core.ServiceTypeNodePort
|
||||
}
|
||||
}
|
||||
return serviceType
|
||||
}
|
||||
|
||||
func mapToDeployment(service types.ServiceConfig, model *types.Config, name string) (*apps.Deployment, error) {
|
||||
labels := map[string]string{
|
||||
"com.docker.compose.service": service.Name,
|
||||
"com.docker.compose.project": name,
|
||||
}
|
||||
podTemplate, err := toPodTemplate(service, labels, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := new(meta.LabelSelector)
|
||||
selector.MatchLabels = make(map[string]string)
|
||||
for key, val := range labels {
|
||||
selector.MatchLabels[key] = val
|
||||
}
|
||||
return &apps.Deployment{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Selector: selector,
|
||||
Replicas: toReplicas(service.Deploy),
|
||||
Strategy: toDeploymentStrategy(service.Deploy),
|
||||
Template: podTemplate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapToDaemonset(service types.ServiceConfig, model *types.Config, name string) (*apps.DaemonSet, error) {
|
||||
labels := map[string]string{
|
||||
"com.docker.compose.service": service.Name,
|
||||
"com.docker.compose.project": name,
|
||||
}
|
||||
podTemplate, err := toPodTemplate(service, labels, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Template: podTemplate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toReplicas(deploy *types.DeployConfig) *int32 {
|
||||
v := int32(1)
|
||||
if deploy != nil {
|
||||
v = int32(*deploy.Replicas)
|
||||
}
|
||||
return &v
|
||||
}
|
||||
|
||||
func toDeploymentStrategy(deploy *types.DeployConfig) apps.DeploymentStrategy {
|
||||
if deploy == nil || deploy.UpdateConfig == nil {
|
||||
return apps.DeploymentStrategy{
|
||||
Type: apps.RecreateDeploymentStrategyType,
|
||||
}
|
||||
}
|
||||
return apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: &intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: int32(*deploy.UpdateConfig.Parallelism),
|
||||
},
|
||||
MaxSurge: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mapToPVC(service types.ServiceConfig, vol types.ServiceVolumeConfig) runtime.Object {
|
||||
rwaccess := core.ReadWriteOnce
|
||||
if vol.ReadOnly {
|
||||
rwaccess = core.ReadOnlyMany
|
||||
}
|
||||
return &core.PersistentVolumeClaim{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: vol.Source,
|
||||
Labels: map[string]string{"com.docker.compose.service": service.Name},
|
||||
},
|
||||
Spec: core.PersistentVolumeClaimSpec{
|
||||
VolumeName: vol.Source,
|
||||
AccessModes: []core.PersistentVolumeAccessMode{rwaccess},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceStorage: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// toSecondsOrDefault converts a duration string in seconds and defaults to a
|
||||
// given value if the duration is nil.
|
||||
// The supported units are us, ms, s, m and h.
|
||||
func toSecondsOrDefault(duration *types.Duration, defaultValue int32) int32 { //nolint: unparam
|
||||
if duration == nil {
|
||||
return defaultValue
|
||||
}
|
||||
return int32(time.Duration(*duration).Seconds())
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var constraintEquals = regexp.MustCompile(`([\w\.]*)\W*(==|!=)\W*([\w\.]*)`)
|
||||
|
||||
const (
|
||||
kubernetesOs = "beta.kubernetes.io/os"
|
||||
kubernetesArch = "beta.kubernetes.io/arch"
|
||||
kubernetesHostname = "kubernetes.io/hostname"
|
||||
)
|
||||
|
||||
// node.id Node ID node.id == 2ivku8v2gvtg4
|
||||
// node.hostname Node hostname node.hostname != node-2
|
||||
// node.role Node role node.role == manager
|
||||
// node.labels user defined node labels node.labels.security == high
|
||||
// engine.labels Docker Engine's labels engine.labels.operatingsystem == ubuntu 14.04
|
||||
func toNodeAffinity(deploy *types.DeployConfig) (*apiv1.Affinity, error) {
|
||||
constraints := []string{}
|
||||
if deploy != nil && deploy.Placement.Constraints != nil {
|
||||
constraints = deploy.Placement.Constraints
|
||||
}
|
||||
requirements := []apiv1.NodeSelectorRequirement{}
|
||||
for _, constraint := range constraints {
|
||||
matches := constraintEquals.FindStringSubmatch(constraint)
|
||||
if len(matches) == 4 {
|
||||
key := matches[1]
|
||||
operator, err := toRequirementOperator(matches[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := matches[3]
|
||||
|
||||
switch {
|
||||
case key == constraintOs:
|
||||
requirements = append(requirements, apiv1.NodeSelectorRequirement{
|
||||
Key: kubernetesOs,
|
||||
Operator: operator,
|
||||
Values: []string{value},
|
||||
})
|
||||
case key == constraintArch:
|
||||
requirements = append(requirements, apiv1.NodeSelectorRequirement{
|
||||
Key: kubernetesArch,
|
||||
Operator: operator,
|
||||
Values: []string{value},
|
||||
})
|
||||
case key == constraintHostname:
|
||||
requirements = append(requirements, apiv1.NodeSelectorRequirement{
|
||||
Key: kubernetesHostname,
|
||||
Operator: operator,
|
||||
Values: []string{value},
|
||||
})
|
||||
case strings.HasPrefix(key, constraintLabelPrefix):
|
||||
requirements = append(requirements, apiv1.NodeSelectorRequirement{
|
||||
Key: strings.TrimPrefix(key, constraintLabelPrefix),
|
||||
Operator: operator,
|
||||
Values: []string{value},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasRequirement(requirements, kubernetesOs) {
|
||||
requirements = append(requirements, apiv1.NodeSelectorRequirement{
|
||||
Key: kubernetesOs,
|
||||
Operator: apiv1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
})
|
||||
}
|
||||
if !hasRequirement(requirements, kubernetesArch) {
|
||||
requirements = append(requirements, apiv1.NodeSelectorRequirement{
|
||||
Key: kubernetesArch,
|
||||
Operator: apiv1.NodeSelectorOpIn,
|
||||
Values: []string{"amd64"},
|
||||
})
|
||||
}
|
||||
return &apiv1.Affinity{
|
||||
NodeAffinity: &apiv1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &apiv1.NodeSelector{
|
||||
NodeSelectorTerms: []apiv1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: requirements,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
constraintOs = "node.platform.os"
|
||||
constraintArch = "node.platform.arch"
|
||||
constraintHostname = "node.hostname"
|
||||
constraintLabelPrefix = "node.labels."
|
||||
)
|
||||
|
||||
func hasRequirement(requirements []apiv1.NodeSelectorRequirement, key string) bool {
|
||||
for _, r := range requirements {
|
||||
if r.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func toRequirementOperator(sign string) (apiv1.NodeSelectorOperator, error) {
|
||||
switch sign {
|
||||
case "==":
|
||||
return apiv1.NodeSelectorOpIn, nil
|
||||
case "!=":
|
||||
return apiv1.NodeSelectorOpNotIn, nil
|
||||
case ">":
|
||||
return apiv1.NodeSelectorOpGt, nil
|
||||
case "<":
|
||||
return apiv1.NodeSelectorOpLt, nil
|
||||
default:
|
||||
return "", errors.Errorf("operator %s not supported", sign)
|
||||
}
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestToPodWithPlacement(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.platform.os == linux
|
||||
- node.platform.arch == amd64
|
||||
- node.hostname == node01
|
||||
- node.labels.label1 == value1
|
||||
- node.labels.label2.subpath != value2
|
||||
`)
|
||||
|
||||
expectedRequirements := []apiv1.NodeSelectorRequirement{
|
||||
{Key: "beta.kubernetes.io/os", Operator: apiv1.NodeSelectorOpIn, Values: []string{"linux"}},
|
||||
{Key: "beta.kubernetes.io/arch", Operator: apiv1.NodeSelectorOpIn, Values: []string{"amd64"}},
|
||||
{Key: "kubernetes.io/hostname", Operator: apiv1.NodeSelectorOpIn, Values: []string{"node01"}},
|
||||
{Key: "label1", Operator: apiv1.NodeSelectorOpIn, Values: []string{"value1"}},
|
||||
{Key: "label2.subpath", Operator: apiv1.NodeSelectorOpNotIn, Values: []string{"value2"}},
|
||||
}
|
||||
|
||||
requirements := podTemplate.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions
|
||||
|
||||
sort.Slice(expectedRequirements, func(i, j int) bool { return expectedRequirements[i].Key < expectedRequirements[j].Key })
|
||||
sort.Slice(requirements, func(i, j int) bool { return requirements[i].Key < requirements[j].Key })
|
||||
|
||||
assert.EqualValues(t, expectedRequirements, requirements)
|
||||
}
|
||||
|
||||
type keyValue struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
func kv(key, value string) keyValue {
|
||||
return keyValue{key: key, value: value}
|
||||
}
|
||||
|
||||
func makeExpectedAffinity(kvs ...keyValue) *apiv1.Affinity {
|
||||
|
||||
var matchExpressions []apiv1.NodeSelectorRequirement
|
||||
for _, kv := range kvs {
|
||||
matchExpressions = append(
|
||||
matchExpressions,
|
||||
apiv1.NodeSelectorRequirement{
|
||||
Key: kv.key,
|
||||
Operator: apiv1.NodeSelectorOpIn,
|
||||
Values: []string{kv.value},
|
||||
},
|
||||
)
|
||||
}
|
||||
return &apiv1.Affinity{
|
||||
NodeAffinity: &apiv1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &apiv1.NodeSelector{
|
||||
NodeSelectorTerms: []apiv1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: matchExpressions,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAfinity(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
source []string
|
||||
expected *apiv1.Affinity
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
expected: makeExpectedAffinity(
|
||||
kv(kubernetesOs, "linux"),
|
||||
kv(kubernetesArch, "amd64"),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "hostname",
|
||||
source: []string{"node.hostname == test"},
|
||||
expected: makeExpectedAffinity(
|
||||
kv(kubernetesHostname, "test"),
|
||||
kv(kubernetesOs, "linux"),
|
||||
kv(kubernetesArch, "amd64"),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "os",
|
||||
source: []string{"node.platform.os == windows"},
|
||||
expected: makeExpectedAffinity(
|
||||
kv(kubernetesOs, "windows"),
|
||||
kv(kubernetesArch, "amd64"),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "arch",
|
||||
source: []string{"node.platform.arch == arm64"},
|
||||
expected: makeExpectedAffinity(
|
||||
kv(kubernetesArch, "arm64"),
|
||||
kv(kubernetesOs, "linux"),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "custom-labels",
|
||||
source: []string{"node.platform.os == windows", "node.platform.arch == arm64"},
|
||||
expected: makeExpectedAffinity(
|
||||
kv(kubernetesArch, "arm64"),
|
||||
kv(kubernetesOs, "windows"),
|
||||
),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result, err := toNodeAffinity(&types.DeployConfig{
|
||||
Placement: types.Placement{
|
||||
Constraints: c.source,
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, nodeAffinityMatch(c.expected, result))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func nodeSelectorRequirementsToMap(source []apiv1.NodeSelectorRequirement, result map[string]apiv1.NodeSelectorRequirement) {
|
||||
for _, t := range source {
|
||||
result[t.Key] = t
|
||||
}
|
||||
}
|
||||
|
||||
func nodeAffinityMatch(expected, actual *apiv1.Affinity) bool {
|
||||
expectedTerms := expected.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
actualTerms := actual.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
expectedExpressions := make(map[string]apiv1.NodeSelectorRequirement)
|
||||
expectedFields := make(map[string]apiv1.NodeSelectorRequirement)
|
||||
actualExpressions := make(map[string]apiv1.NodeSelectorRequirement)
|
||||
actualFields := make(map[string]apiv1.NodeSelectorRequirement)
|
||||
for _, v := range expectedTerms {
|
||||
nodeSelectorRequirementsToMap(v.MatchExpressions, expectedExpressions)
|
||||
nodeSelectorRequirementsToMap(v.MatchFields, expectedFields)
|
||||
}
|
||||
for _, v := range actualTerms {
|
||||
nodeSelectorRequirementsToMap(v.MatchExpressions, actualExpressions)
|
||||
nodeSelectorRequirementsToMap(v.MatchFields, actualFields)
|
||||
}
|
||||
return reflect.DeepEqual(expectedExpressions, actualExpressions) && reflect.DeepEqual(expectedFields, actualFields)
|
||||
}
|
|
@ -1,349 +0,0 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func toPodTemplate(serviceConfig types.ServiceConfig, labels map[string]string, model *types.Config) (apiv1.PodTemplateSpec, error) {
|
||||
tpl := apiv1.PodTemplateSpec{}
|
||||
//nodeAffinity, err := toNodeAffinity(serviceConfig.Deploy)
|
||||
//if err != nil {
|
||||
// return apiv1.PodTemplateSpec{}, err
|
||||
//}
|
||||
hostAliases, err := toHostAliases(serviceConfig.ExtraHosts)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
env, err := toEnv(serviceConfig.Environment)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
restartPolicy, err := toRestartPolicy(serviceConfig)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
|
||||
var limits apiv1.ResourceList
|
||||
if serviceConfig.Deploy != nil && serviceConfig.Deploy.Resources.Limits != nil {
|
||||
limits, err = toResource(serviceConfig.Deploy.Resources.Limits)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
}
|
||||
var requests apiv1.ResourceList
|
||||
if serviceConfig.Deploy != nil && serviceConfig.Deploy.Resources.Reservations != nil {
|
||||
requests, err = toResource(serviceConfig.Deploy.Resources.Reservations)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
}
|
||||
|
||||
volumes, err := toVolumes(serviceConfig, model)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
volumeMounts, err := toVolumeMounts(serviceConfig, model)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
/* pullPolicy, err := toImagePullPolicy(serviceConfig.Image, x-kubernetes-pull-policy)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
} */
|
||||
tpl.ObjectMeta = metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: serviceConfig.Labels,
|
||||
}
|
||||
tpl.Spec.RestartPolicy = restartPolicy
|
||||
tpl.Spec.Volumes = volumes
|
||||
tpl.Spec.HostPID = toHostPID(serviceConfig.Pid)
|
||||
tpl.Spec.HostIPC = toHostIPC(serviceConfig.Ipc)
|
||||
tpl.Spec.Hostname = serviceConfig.Hostname
|
||||
tpl.Spec.TerminationGracePeriodSeconds = toTerminationGracePeriodSeconds(serviceConfig.StopGracePeriod)
|
||||
tpl.Spec.HostAliases = hostAliases
|
||||
//tpl.Spec.Affinity = nodeAffinity
|
||||
// we dont want to remove all containers and recreate them because:
|
||||
// an admission plugin can add sidecar containers
|
||||
// we for sure want to keep the main container to be additive
|
||||
if len(tpl.Spec.Containers) == 0 {
|
||||
tpl.Spec.Containers = []apiv1.Container{{}}
|
||||
}
|
||||
|
||||
containerIX := 0
|
||||
for ix, c := range tpl.Spec.Containers {
|
||||
if c.Name == serviceConfig.Name {
|
||||
containerIX = ix
|
||||
break
|
||||
}
|
||||
}
|
||||
tpl.Spec.Containers[containerIX].Name = serviceConfig.Name
|
||||
tpl.Spec.Containers[containerIX].Image = serviceConfig.Image
|
||||
// FIXME tpl.Spec.Containers[containerIX].ImagePullPolicy = pullPolicy
|
||||
tpl.Spec.Containers[containerIX].Command = serviceConfig.Entrypoint
|
||||
tpl.Spec.Containers[containerIX].Args = serviceConfig.Command
|
||||
tpl.Spec.Containers[containerIX].WorkingDir = serviceConfig.WorkingDir
|
||||
tpl.Spec.Containers[containerIX].TTY = serviceConfig.Tty
|
||||
tpl.Spec.Containers[containerIX].Stdin = serviceConfig.StdinOpen
|
||||
tpl.Spec.Containers[containerIX].Ports = toPorts(serviceConfig.Ports)
|
||||
tpl.Spec.Containers[containerIX].LivenessProbe = toLivenessProbe(serviceConfig.HealthCheck)
|
||||
tpl.Spec.Containers[containerIX].Env = env
|
||||
tpl.Spec.Containers[containerIX].VolumeMounts = volumeMounts
|
||||
tpl.Spec.Containers[containerIX].SecurityContext = toSecurityContext(serviceConfig)
|
||||
tpl.Spec.Containers[containerIX].Resources = apiv1.ResourceRequirements{
|
||||
Limits: limits,
|
||||
Requests: requests,
|
||||
}
|
||||
|
||||
/* FIXME
|
||||
if serviceConfig.PullSecret != "" {
|
||||
pullSecrets := map[string]struct{}{}
|
||||
for _, ps := range tpl.Spec.ImagePullSecrets {
|
||||
pullSecrets[ps.Name] = struct{}{}
|
||||
}
|
||||
if _, ok := pullSecrets[serviceConfig.PullSecret]; !ok {
|
||||
tpl.Spec.ImagePullSecrets = append(tpl.Spec.ImagePullSecrets, apiv1.LocalObjectReference{Name: serviceConfig.PullSecret})
|
||||
}
|
||||
}
|
||||
*/
|
||||
return tpl, nil
|
||||
}
|
||||
|
||||
func toImagePullPolicy(image string, specifiedPolicy string) (apiv1.PullPolicy, error) {
|
||||
if specifiedPolicy == "" {
|
||||
if strings.HasSuffix(image, ":latest") {
|
||||
return apiv1.PullAlways, nil
|
||||
}
|
||||
return apiv1.PullIfNotPresent, nil
|
||||
}
|
||||
switch apiv1.PullPolicy(specifiedPolicy) {
|
||||
case apiv1.PullAlways, apiv1.PullIfNotPresent, apiv1.PullNever:
|
||||
return apiv1.PullPolicy(specifiedPolicy), nil
|
||||
default:
|
||||
return "", errors.Errorf("invalid pull policy %q, must be %q, %q or %q", specifiedPolicy, apiv1.PullAlways, apiv1.PullIfNotPresent, apiv1.PullNever)
|
||||
}
|
||||
}
|
||||
|
||||
func toHostAliases(extraHosts []string) ([]apiv1.HostAlias, error) {
|
||||
if extraHosts == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
byHostnames := map[string]string{}
|
||||
for _, host := range extraHosts {
|
||||
split := strings.SplitN(host, ":", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, errors.Errorf("malformed host %s", host)
|
||||
}
|
||||
byHostnames[split[0]] = split[1]
|
||||
}
|
||||
|
||||
byIPs := map[string][]string{}
|
||||
for k, v := range byHostnames {
|
||||
byIPs[v] = append(byIPs[v], k)
|
||||
}
|
||||
|
||||
aliases := make([]apiv1.HostAlias, len(byIPs))
|
||||
i := 0
|
||||
for key, hosts := range byIPs {
|
||||
sort.Strings(hosts)
|
||||
aliases[i] = apiv1.HostAlias{
|
||||
IP: key,
|
||||
Hostnames: hosts,
|
||||
}
|
||||
i++
|
||||
}
|
||||
sort.Slice(aliases, func(i, j int) bool { return aliases[i].IP < aliases[j].IP })
|
||||
return aliases, nil
|
||||
}
|
||||
|
||||
func toHostPID(pid string) bool {
|
||||
return "host" == pid
|
||||
}
|
||||
|
||||
func toHostIPC(ipc string) bool {
|
||||
return "host" == ipc
|
||||
}
|
||||
|
||||
func toTerminationGracePeriodSeconds(duration *types.Duration) *int64 {
|
||||
if duration == nil {
|
||||
return nil
|
||||
}
|
||||
gracePeriod := int64(time.Duration(*duration).Seconds())
|
||||
return &gracePeriod
|
||||
}
|
||||
|
||||
func toLivenessProbe(hc *types.HealthCheckConfig) *apiv1.Probe {
|
||||
if hc == nil || len(hc.Test) < 1 || hc.Test[0] == "NONE" {
|
||||
return nil
|
||||
}
|
||||
|
||||
command := hc.Test[1:]
|
||||
if hc.Test[0] == "CMD-SHELL" {
|
||||
command = append([]string{"sh", "-c"}, command...)
|
||||
}
|
||||
|
||||
return &apiv1.Probe{
|
||||
TimeoutSeconds: toSecondsOrDefault(hc.Timeout, 1),
|
||||
PeriodSeconds: toSecondsOrDefault(hc.Interval, 1),
|
||||
FailureThreshold: int32(defaultUint64(hc.Retries, 3)),
|
||||
Handler: apiv1.Handler{
|
||||
Exec: &apiv1.ExecAction{
|
||||
Command: command,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toEnv(env map[string]*string) ([]apiv1.EnvVar, error) {
|
||||
var envVars []apiv1.EnvVar
|
||||
|
||||
for k, v := range env {
|
||||
if v == nil {
|
||||
return nil, errors.Errorf("%s has no value, unsetting an environment variable is not supported", k)
|
||||
}
|
||||
envVars = append(envVars, toEnvVar(k, *v))
|
||||
}
|
||||
sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name })
|
||||
return envVars, nil
|
||||
}
|
||||
|
||||
func toEnvVar(key, value string) apiv1.EnvVar {
|
||||
return apiv1.EnvVar{
|
||||
Name: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
func toPorts(list []types.ServicePortConfig) []apiv1.ContainerPort {
|
||||
var ports []apiv1.ContainerPort
|
||||
|
||||
for _, v := range list {
|
||||
ports = append(ports, apiv1.ContainerPort{
|
||||
ContainerPort: int32(v.Target),
|
||||
Protocol: toProtocol(v.Protocol),
|
||||
})
|
||||
}
|
||||
|
||||
return ports
|
||||
}
|
||||
|
||||
func toProtocol(value string) apiv1.Protocol {
|
||||
if value == "udp" {
|
||||
return apiv1.ProtocolUDP
|
||||
}
|
||||
return apiv1.ProtocolTCP
|
||||
}
|
||||
|
||||
func toRestartPolicy(s types.ServiceConfig) (apiv1.RestartPolicy, error) {
|
||||
if s.Deploy == nil || s.Deploy.RestartPolicy == nil {
|
||||
return apiv1.RestartPolicyAlways, nil
|
||||
}
|
||||
policy := s.Deploy.RestartPolicy
|
||||
|
||||
switch policy.Condition {
|
||||
case string(swarm.RestartPolicyConditionAny):
|
||||
return apiv1.RestartPolicyAlways, nil
|
||||
case string(swarm.RestartPolicyConditionNone):
|
||||
return apiv1.RestartPolicyNever, nil
|
||||
case string(swarm.RestartPolicyConditionOnFailure):
|
||||
return apiv1.RestartPolicyOnFailure, nil
|
||||
default:
|
||||
return "", errors.Errorf("unsupported restart policy %s", policy.Condition)
|
||||
}
|
||||
}
|
||||
|
||||
func toResource(res *types.Resource) (apiv1.ResourceList, error) {
|
||||
list := make(apiv1.ResourceList)
|
||||
if res.NanoCPUs != "" {
|
||||
cpus, err := resource.ParseQuantity(res.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list[apiv1.ResourceCPU] = cpus
|
||||
}
|
||||
if res.MemoryBytes != 0 {
|
||||
memory, err := resource.ParseQuantity(fmt.Sprintf("%v", res.MemoryBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list[apiv1.ResourceMemory] = memory
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func toSecurityContext(s types.ServiceConfig) *apiv1.SecurityContext {
|
||||
isPrivileged := toBoolPointer(s.Privileged)
|
||||
isReadOnly := toBoolPointer(s.ReadOnly)
|
||||
|
||||
var capabilities *apiv1.Capabilities
|
||||
if s.CapAdd != nil || s.CapDrop != nil {
|
||||
capabilities = &apiv1.Capabilities{
|
||||
Add: toCapabilities(s.CapAdd),
|
||||
Drop: toCapabilities(s.CapDrop),
|
||||
}
|
||||
}
|
||||
|
||||
var userID *int64
|
||||
if s.User != "" {
|
||||
numerical, err := strconv.Atoi(s.User)
|
||||
if err == nil {
|
||||
unixUserID := int64(numerical)
|
||||
userID = &unixUserID
|
||||
}
|
||||
}
|
||||
|
||||
if isPrivileged == nil && isReadOnly == nil && capabilities == nil && userID == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &apiv1.SecurityContext{
|
||||
RunAsUser: userID,
|
||||
Privileged: isPrivileged,
|
||||
ReadOnlyRootFilesystem: isReadOnly,
|
||||
Capabilities: capabilities,
|
||||
}
|
||||
}
|
||||
|
||||
func toBoolPointer(value bool) *bool {
|
||||
if value {
|
||||
return &value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultUint64(v *uint64, defaultValue uint64) uint64 { //nolint: unparam
|
||||
if v == nil {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return *v
|
||||
}
|
||||
|
||||
func toCapabilities(list []string) (capabilities []apiv1.Capability) {
|
||||
for _, c := range list {
|
||||
capabilities = append(capabilities, apiv1.Capability(c))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
func forceRestartPolicy(podTemplate apiv1.PodTemplateSpec, forcedRestartPolicy apiv1.RestartPolicy) apiv1.PodTemplateSpec {
|
||||
if podTemplate.Spec.RestartPolicy != "" {
|
||||
podTemplate.Spec.RestartPolicy = forcedRestartPolicy
|
||||
}
|
||||
|
||||
return podTemplate
|
||||
}
|
|
@ -1,990 +0,0 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func loadYAML(yaml string) (*loader.Config, error) {
|
||||
dict, err := loader.ParseYAML([]byte(yaml))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
configs := ConfigFiles: []types.ConfigFile{
|
||||
{Filename: "compose.yaml", Config: dict},
|
||||
},
|
||||
|
||||
config := types.ConfigDetails{
|
||||
WorkingDir: workingDir,
|
||||
ConfigFiles: configs,
|
||||
Environment: utils.Environment(),
|
||||
}
|
||||
model, err := loader.Load(config)
|
||||
return model
|
||||
}
|
||||
|
||||
func podTemplate(t *testing.T, yaml string) apiv1.PodTemplateSpec {
|
||||
res, err := podTemplateWithError(yaml)
|
||||
assert.NoError(t, err)
|
||||
return res
|
||||
}
|
||||
|
||||
func podTemplateWithError(yaml string) (apiv1.PodTemplateSpec, error) {
|
||||
model, err := loadYAML(yaml)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
|
||||
return toPodTemplate(model.Services[0], nil, model)
|
||||
}
|
||||
|
||||
func TestToPodWithDockerSocket(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("on windows, source path validation is broken (and actually, source validation for windows workload is broken too). Skip it for now, as we don't support it yet")
|
||||
return
|
||||
}
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "mount-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{
|
||||
Path: "/var/run",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "mount-0",
|
||||
MountPath: "/var/run/docker.sock",
|
||||
SubPath: "docker.sock",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func TestToPodWithFunkyCommand(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: basi/node-exporter
|
||||
command: ["-collector.procfs", "/host/proc", "-collector.sysfs", "/host/sys"]
|
||||
`)
|
||||
|
||||
expectedArgs := []string{
|
||||
`-collector.procfs`,
|
||||
`/host/proc`, // ?
|
||||
`-collector.sysfs`,
|
||||
`/host/sys`, // ?
|
||||
}
|
||||
assert.Equal(t, expectedArgs, podTemplate.Spec.Containers[0].Args)
|
||||
}
|
||||
|
||||
func TestToPodWithGlobalVolume(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
db:
|
||||
image: "postgres:9.4"
|
||||
volumes:
|
||||
- dbdata:/var/lib/postgresql/data
|
||||
`)
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "dbdata",
|
||||
MountPath: "/var/lib/postgresql/data",
|
||||
}
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 0)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func TestToPodWithResources(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
db:
|
||||
image: "postgres:9.4"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "0.001"
|
||||
memory: 50Mb
|
||||
reservations:
|
||||
cpus: "0.0001"
|
||||
memory: 20Mb
|
||||
`)
|
||||
|
||||
expectedResourceRequirements := apiv1.ResourceRequirements{
|
||||
Limits: map[apiv1.ResourceName]resource.Quantity{
|
||||
apiv1.ResourceCPU: resource.MustParse("0.001"),
|
||||
apiv1.ResourceMemory: resource.MustParse(fmt.Sprintf("%d", 50*1024*1024)),
|
||||
},
|
||||
Requests: map[apiv1.ResourceName]resource.Quantity{
|
||||
apiv1.ResourceCPU: resource.MustParse("0.0001"),
|
||||
apiv1.ResourceMemory: resource.MustParse(fmt.Sprintf("%d", 20*1024*1024)),
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedResourceRequirements, podTemplate.Spec.Containers[0].Resources)
|
||||
}
|
||||
|
||||
func TestToPodWithCapabilities(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
cap_add:
|
||||
- ALL
|
||||
cap_drop:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
`)
|
||||
|
||||
expectedSecurityContext := &apiv1.SecurityContext{
|
||||
Capabilities: &apiv1.Capabilities{
|
||||
Add: []apiv1.Capability{"ALL"},
|
||||
Drop: []apiv1.Capability{"NET_ADMIN", "SYS_ADMIN"},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedSecurityContext, podTemplate.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
||||
func TestToPodWithReadOnly(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
read_only: true
|
||||
`)
|
||||
|
||||
yes := true
|
||||
expectedSecurityContext := &apiv1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &yes,
|
||||
}
|
||||
assert.Equal(t, expectedSecurityContext, podTemplate.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
||||
func TestToPodWithPrivileged(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
privileged: true
|
||||
`)
|
||||
|
||||
yes := true
|
||||
expectedSecurityContext := &apiv1.SecurityContext{
|
||||
Privileged: &yes,
|
||||
}
|
||||
assert.Equal(t, expectedSecurityContext, podTemplate.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
||||
func TestToPodWithEnvNilShouldErrorOut(t *testing.T) {
|
||||
_, err := podTemplateWithError(`
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
environment:
|
||||
- SESSION_SECRET
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestToPodWithEnv(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
environment:
|
||||
- RACK_ENV=development
|
||||
- SHOW=true
|
||||
`)
|
||||
|
||||
expectedEnv := []apiv1.EnvVar{
|
||||
{
|
||||
Name: "RACK_ENV",
|
||||
Value: "development",
|
||||
},
|
||||
{
|
||||
Name: "SHOW",
|
||||
Value: "true",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedEnv, podTemplate.Spec.Containers[0].Env)
|
||||
}
|
||||
|
||||
func TestToPodWithVolume(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("on windows, source path validation is broken (and actually, source validation for windows workload is broken too). Skip it for now, as we don't support it yet")
|
||||
return
|
||||
}
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
volumes:
|
||||
- /ignore:/ignore
|
||||
- /opt/data:/var/lib/mysql:ro
|
||||
`)
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 2)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 2)
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithRelativeVolumes(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("on windows, source path validation is broken (and actually, source validation for windows workload is broken too). Skip it for now, as we don't support it yet")
|
||||
return
|
||||
}
|
||||
_, err := podTemplateWithError(`
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
volumes:
|
||||
- ./fail:/ignore
|
||||
`)
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestToPodWithHealthCheck(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 90s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
`)
|
||||
|
||||
expectedLivenessProbe := &apiv1.Probe{
|
||||
TimeoutSeconds: 10,
|
||||
PeriodSeconds: 90,
|
||||
FailureThreshold: 3,
|
||||
Handler: apiv1.Handler{
|
||||
Exec: &apiv1.ExecAction{
|
||||
Command: []string{"curl", "-f", "http://localhost"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedLivenessProbe, podTemplate.Spec.Containers[0].LivenessProbe)
|
||||
}
|
||||
|
||||
func TestToPodWithShellHealthCheck(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost"]
|
||||
`)
|
||||
|
||||
expectedLivenessProbe := &apiv1.Probe{
|
||||
TimeoutSeconds: 1,
|
||||
PeriodSeconds: 1,
|
||||
FailureThreshold: 3,
|
||||
Handler: apiv1.Handler{
|
||||
Exec: &apiv1.ExecAction{
|
||||
Command: []string{"sh", "-c", "curl -f http://localhost"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedLivenessProbe, podTemplate.Spec.Containers[0].LivenessProbe)
|
||||
}
|
||||
|
||||
func TestToPodWithTargetlessExternalSecret(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
secrets:
|
||||
- my_secret
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "secret-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: "my_secret",
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file", // TODO: This is the key we assume external secrets use
|
||||
Path: "secret-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "secret-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/run/secrets/my_secret",
|
||||
SubPath: "secret-0",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func TestToPodWithExternalSecret(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
secrets:
|
||||
- source: my_secret
|
||||
target: nginx_secret
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "secret-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: "my_secret",
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file", // TODO: This is the key we assume external secrets use
|
||||
Path: "secret-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "secret-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/run/secrets/nginx_secret",
|
||||
SubPath: "secret-0",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithFileBasedSecret(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
secrets:
|
||||
- source: my_secret
|
||||
secrets:
|
||||
my_secret:
|
||||
file: ./secret.txt
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "secret-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: "my_secret",
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "secret.txt",
|
||||
Path: "secret-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "secret-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/run/secrets/my_secret",
|
||||
SubPath: "secret-0",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithTwoFileBasedSecrets(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
secrets:
|
||||
- source: my_secret1
|
||||
- source: my_secret2
|
||||
target: secret2
|
||||
secrets:
|
||||
my_secret1:
|
||||
file: ./secret1.txt
|
||||
my_secret2:
|
||||
file: ./secret2.txt
|
||||
`)
|
||||
|
||||
expectedVolumes := []apiv1.Volume{
|
||||
{
|
||||
Name: "secret-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: "my_secret1",
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "secret1.txt",
|
||||
Path: "secret-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret-1",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: "my_secret2",
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "secret2.txt",
|
||||
Path: "secret-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMounts := []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "secret-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/run/secrets/my_secret1",
|
||||
SubPath: "secret-0",
|
||||
},
|
||||
{
|
||||
Name: "secret-1",
|
||||
ReadOnly: true,
|
||||
MountPath: "/run/secrets/secret2",
|
||||
SubPath: "secret-1",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedVolumes, podTemplate.Spec.Volumes)
|
||||
assert.Equal(t, expectedMounts, podTemplate.Spec.Containers[0].VolumeMounts)
|
||||
}
|
||||
|
||||
func TestToPodWithTerminationGracePeriod(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
stop_grace_period: 100s
|
||||
`)
|
||||
|
||||
expected := int64(100)
|
||||
assert.Equal(t, &expected, podTemplate.Spec.TerminationGracePeriodSeconds)
|
||||
}
|
||||
|
||||
func TestToPodWithTmpfs(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
tmpfs:
|
||||
- /tmp
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "tmp-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
EmptyDir: &apiv1.EmptyDirVolumeSource{
|
||||
Medium: "Memory",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "tmp-0",
|
||||
MountPath: "/tmp",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func TestToPodWithNumericalUser(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
user: "1000"
|
||||
`)
|
||||
|
||||
userID := int64(1000)
|
||||
|
||||
expectedSecurityContext := &apiv1.SecurityContext{
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedSecurityContext, podTemplate.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
||||
func TestToPodWithGitVolume(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
volumes:
|
||||
- source: "git@github.com:moby/moby.git"
|
||||
target: /sources
|
||||
type: git
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "mount-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
GitRepo: &apiv1.GitRepoVolumeSource{
|
||||
Repository: "git@github.com:moby/moby.git",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "mount-0",
|
||||
ReadOnly: false,
|
||||
MountPath: "/sources",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithFileBasedConfig(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
configs:
|
||||
- source: my_config
|
||||
target: /usr/share/nginx/html/index.html
|
||||
uid: "103"
|
||||
gid: "103"
|
||||
mode: 0440
|
||||
configs:
|
||||
my_config:
|
||||
file: ./file.html
|
||||
`)
|
||||
|
||||
mode := int32(0440)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "config-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "my_config",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file.html",
|
||||
Path: "config-0",
|
||||
Mode: &mode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "config-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/usr/share/nginx/html/index.html",
|
||||
SubPath: "config-0",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithTargetlessFileBasedConfig(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
configs:
|
||||
- my_config
|
||||
configs:
|
||||
my_config:
|
||||
file: ./file.html
|
||||
`)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "config-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "myconfig",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file.html",
|
||||
Path: "config-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "config-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/myconfig",
|
||||
SubPath: "config-0",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func TestToPodWithExternalConfig(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
configs:
|
||||
- source: my_config
|
||||
target: /usr/share/nginx/html/index.html
|
||||
uid: "103"
|
||||
gid: "103"
|
||||
mode: 0440
|
||||
configs:
|
||||
my_config:
|
||||
external: true
|
||||
`)
|
||||
|
||||
mode := int32(0440)
|
||||
|
||||
expectedVolume := apiv1.Volume{
|
||||
Name: "config-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "my_config",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file", // TODO: This is the key we assume external config use
|
||||
Path: "config-0",
|
||||
Mode: &mode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMount := apiv1.VolumeMount{
|
||||
Name: "config-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/usr/share/nginx/html/index.html",
|
||||
SubPath: "config-0",
|
||||
}
|
||||
|
||||
assert.Len(t, podTemplate.Spec.Volumes, 1)
|
||||
assert.Len(t, podTemplate.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, expectedVolume, podTemplate.Spec.Volumes[0])
|
||||
assert.Equal(t, expectedMount, podTemplate.Spec.Containers[0].VolumeMounts[0])
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithTwoConfigsSameMountPoint(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
configs:
|
||||
- source: first
|
||||
target: /data/first.json
|
||||
mode: "0440"
|
||||
- source: second
|
||||
target: /data/second.json
|
||||
mode: "0550"
|
||||
configs:
|
||||
first:
|
||||
file: ./file1
|
||||
secondv:
|
||||
file: ./file2
|
||||
`)
|
||||
|
||||
mode0440 := int32(0440)
|
||||
mode0550 := int32(0550)
|
||||
|
||||
expectedVolumes := []apiv1.Volume{
|
||||
{
|
||||
Name: "config-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "first",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file1",
|
||||
Path: "config-0",
|
||||
Mode: &mode0440,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "config-1",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "second",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file2",
|
||||
Path: "config-1",
|
||||
Mode: &mode0550,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMounts := []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "config-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/data/first.json",
|
||||
SubPath: "config-0",
|
||||
},
|
||||
{
|
||||
Name: "config-1",
|
||||
ReadOnly: true,
|
||||
MountPath: "/data/second.json",
|
||||
SubPath: "config-1",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedVolumes, podTemplate.Spec.Volumes)
|
||||
assert.Equal(t, expectedMounts, podTemplate.Spec.Containers[0].VolumeMounts)
|
||||
}
|
||||
|
||||
func TestToPodWithTwoExternalConfigsSameMountPoint(t *testing.T) {
|
||||
podTemplate := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
configs:
|
||||
- source: first
|
||||
target: /data/first.json
|
||||
- source: second
|
||||
target: /data/second.json
|
||||
configs:
|
||||
first:
|
||||
file: ./file1
|
||||
second:
|
||||
file: ./file2
|
||||
`)
|
||||
|
||||
expectedVolumes := []apiv1.Volume{
|
||||
{
|
||||
Name: "config-0",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "first",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file",
|
||||
Path: "config-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "config-1",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: "second",
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: "file",
|
||||
Path: "config-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedMounts := []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "config-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/data/first.json",
|
||||
SubPath: "config-0",
|
||||
},
|
||||
{
|
||||
Name: "config-1",
|
||||
ReadOnly: true,
|
||||
MountPath: "/data/second.json",
|
||||
SubPath: "config-1",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedVolumes, podTemplate.Spec.Volumes)
|
||||
assert.Equal(t, expectedMounts, podTemplate.Spec.Containers[0].VolumeMounts)
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithPullSecret(t *testing.T) {
|
||||
podTemplateWithSecret := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
x-kubernetes.pull-secret: test-pull-secret
|
||||
`)
|
||||
|
||||
assert.Equal(t, 1, len(podTemplateWithSecret.Spec.ImagePullSecrets))
|
||||
assert.Equal(t, "test-pull-secret", podTemplateWithSecret.Spec.ImagePullSecrets[0].Name)
|
||||
|
||||
podTemplateNoSecret := podTemplate(t, `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
`)
|
||||
|
||||
assert.Nil(t, podTemplateNoSecret.Spec.ImagePullSecrets)
|
||||
}
|
||||
|
||||
func /*FIXME Test*/ ToPodWithPullPolicy(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
stack string
|
||||
expectedPolicy apiv1.PullPolicy
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "specific tag",
|
||||
stack: `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:specific
|
||||
`,
|
||||
expectedPolicy: apiv1.PullIfNotPresent,
|
||||
},
|
||||
{
|
||||
name: "latest tag",
|
||||
stack: `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
`,
|
||||
expectedPolicy: apiv1.PullAlways,
|
||||
},
|
||||
{
|
||||
name: "explicit policy",
|
||||
stack: `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:specific
|
||||
x-kubernetes.pull-policy: Never
|
||||
`,
|
||||
expectedPolicy: apiv1.PullNever,
|
||||
},
|
||||
{
|
||||
name: "invalid policy",
|
||||
stack: `
|
||||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:specific
|
||||
x-kubernetes.pull-policy: Invalid
|
||||
`,
|
||||
expectedError: `invalid pull policy "Invalid", must be "Always", "IfNotPresent" or "Never"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
pod, err := podTemplateWithError(c.stack)
|
||||
if c.expectedError != "" {
|
||||
assert.EqualError(t, err, c.expectedError)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pod.Spec.Containers[0].ImagePullPolicy, c.expectedPolicy)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const dockerSock = "/var/run/docker.sock"
|
||||
|
||||
type volumeSpec struct {
|
||||
mount apiv1.VolumeMount
|
||||
source *apiv1.VolumeSource
|
||||
}
|
||||
|
||||
func hasPersistentVolumes(s types.ServiceConfig) bool {
|
||||
for _, volume := range s.Volumes {
|
||||
if volume.Type == "volume" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func toVolumeSpecs(s types.ServiceConfig, model *types.Config) ([]volumeSpec, error) {
|
||||
var specs []volumeSpec
|
||||
for i, m := range s.Volumes {
|
||||
var source *apiv1.VolumeSource
|
||||
name := fmt.Sprintf("mount-%d", i)
|
||||
subpath := ""
|
||||
if m.Source == dockerSock && m.Target == dockerSock {
|
||||
subpath = "docker.sock"
|
||||
source = hostPathVolume("/var/run")
|
||||
} else if strings.HasSuffix(m.Source, ".git") {
|
||||
source = gitVolume(m.Source)
|
||||
} else if m.Type == "volume" {
|
||||
if m.Source != "" {
|
||||
name = strings.ReplaceAll(m.Source, "_", "-")
|
||||
}
|
||||
} else {
|
||||
// bind mount
|
||||
if !filepath.IsAbs(m.Source) {
|
||||
return nil, errors.Errorf("%s: only absolute paths can be specified in mount source", m.Source)
|
||||
}
|
||||
if m.Source == "/" {
|
||||
source = hostPathVolume("/")
|
||||
} else {
|
||||
parent, file := filepath.Split(m.Source)
|
||||
if parent != "/" {
|
||||
parent = strings.TrimSuffix(parent, "/")
|
||||
}
|
||||
source = hostPathVolume(parent)
|
||||
subpath = file
|
||||
}
|
||||
}
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: source,
|
||||
mount: volumeMount(name, m.Target, m.ReadOnly, subpath),
|
||||
})
|
||||
}
|
||||
|
||||
for i, m := range s.Tmpfs {
|
||||
name := fmt.Sprintf("tmp-%d", i)
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: emptyVolumeInMemory(),
|
||||
mount: volumeMount(name, m, false, ""),
|
||||
})
|
||||
}
|
||||
|
||||
for i, s := range s.Secrets {
|
||||
name := fmt.Sprintf("secret-%d", i)
|
||||
|
||||
target := path.Join("/run/secrets", or(s.Target, s.Source))
|
||||
subPath := name
|
||||
readOnly := true
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: secretVolume(s, model.Secrets[name], subPath),
|
||||
mount: volumeMount(name, target, readOnly, subPath),
|
||||
})
|
||||
}
|
||||
|
||||
for i, c := range s.Configs {
|
||||
name := fmt.Sprintf("config-%d", i)
|
||||
|
||||
target := or(c.Target, "/"+c.Source)
|
||||
subPath := name
|
||||
readOnly := true
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: configVolume(c, model.Configs[name], subPath),
|
||||
mount: volumeMount(name, target, readOnly, subPath),
|
||||
})
|
||||
}
|
||||
|
||||
return specs, nil
|
||||
}
|
||||
|
||||
func or(v string, defaultValue string) string {
|
||||
if v != "" && v != "." {
|
||||
return v
|
||||
}
|
||||
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func toVolumeMounts(s types.ServiceConfig, model *types.Config) ([]apiv1.VolumeMount, error) {
|
||||
var mounts []apiv1.VolumeMount
|
||||
specs, err := toVolumeSpecs(s, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, spec := range specs {
|
||||
mounts = append(mounts, spec.mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func toVolumes(s types.ServiceConfig, model *types.Config) ([]apiv1.Volume, error) {
|
||||
var volumes []apiv1.Volume
|
||||
specs, err := toVolumeSpecs(s, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, spec := range specs {
|
||||
if spec.source == nil {
|
||||
spec.source = emptyVolumeInMemory()
|
||||
}
|
||||
volumes = append(volumes, apiv1.Volume{
|
||||
Name: spec.mount.Name,
|
||||
VolumeSource: *spec.source,
|
||||
})
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func gitVolume(path string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
GitRepo: &apiv1.GitRepoVolumeSource{
|
||||
Repository: filepath.ToSlash(path),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func hostPathVolume(path string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{
|
||||
Path: path,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMode(mode *uint32) *int32 {
|
||||
var defaultMode *int32
|
||||
|
||||
if mode != nil {
|
||||
signedMode := int32(*mode)
|
||||
defaultMode = &signedMode
|
||||
}
|
||||
|
||||
return defaultMode
|
||||
}
|
||||
|
||||
func secretVolume(config types.ServiceSecretConfig, topLevelConfig types.SecretConfig, subPath string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: config.Source,
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: toKey(topLevelConfig.File),
|
||||
Path: subPath,
|
||||
Mode: defaultMode(config.Mode),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func volumeMount(name, path string, readOnly bool, subPath string) apiv1.VolumeMount {
|
||||
return apiv1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: path,
|
||||
ReadOnly: readOnly,
|
||||
SubPath: subPath,
|
||||
}
|
||||
}
|
||||
|
||||
func configVolume(config types.ServiceConfigObjConfig, topLevelConfig types.ConfigObjConfig, subPath string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: config.Source,
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: toKey(topLevelConfig.File),
|
||||
Path: subPath,
|
||||
Mode: defaultMode(config.Mode),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toKey(file string) string {
|
||||
if file != "" {
|
||||
return path.Base(file)
|
||||
}
|
||||
|
||||
return "file" // TODO: hard-coded key for external configs
|
||||
}
|
||||
|
||||
func emptyVolumeInMemory() *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
EmptyDir: &apiv1.EmptyDirVolumeSource{
|
||||
Medium: apiv1.StorageMediumMemory,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
var SupportedFilenames = []string{"compose.yaml", "compose.yml", "docker-compose.yml", "docker-compose.yaml"}
|
||||
|
||||
func GetConfigs(name string, configPaths []string) (string, []types.ConfigFile, error) {
|
||||
configPath, err := getConfigPaths(configPaths)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if configPath == nil {
|
||||
return "", nil, nil
|
||||
}
|
||||
workingDir := filepath.Dir(configPath[0])
|
||||
|
||||
if name == "" {
|
||||
name = os.Getenv("COMPOSE_PROJECT_NAME")
|
||||
}
|
||||
if name == "" {
|
||||
r := regexp.MustCompile(`[^a-z0-9\\-_]+`)
|
||||
name = r.ReplaceAllString(strings.ToLower(filepath.Base(workingDir)), "")
|
||||
}
|
||||
|
||||
configs, err := parseConfigs(configPath)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return workingDir, configs, nil
|
||||
}
|
||||
|
||||
func getConfigPaths(configPaths []string) ([]string, error) {
|
||||
paths := []string{}
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(configPaths) != 0 {
|
||||
for _, f := range configPaths {
|
||||
if f == "-" {
|
||||
paths = append(paths, f)
|
||||
continue
|
||||
}
|
||||
if !filepath.IsAbs(f) {
|
||||
f = filepath.Join(pwd, f)
|
||||
}
|
||||
if _, err := os.Stat(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paths = append(paths, f)
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
sep := os.Getenv("COMPOSE_FILE_SEPARATOR")
|
||||
if sep == "" {
|
||||
sep = string(os.PathListSeparator)
|
||||
}
|
||||
f := os.Getenv("COMPOSE_FILE")
|
||||
if f != "" {
|
||||
return strings.Split(f, sep), nil
|
||||
}
|
||||
|
||||
for {
|
||||
candidates := []string{}
|
||||
for _, n := range SupportedFilenames {
|
||||
f := filepath.Join(pwd, n)
|
||||
if _, err := os.Stat(f); err == nil {
|
||||
candidates = append(candidates, f)
|
||||
}
|
||||
}
|
||||
if len(candidates) > 0 {
|
||||
winner := candidates[0]
|
||||
if len(candidates) > 1 {
|
||||
log.Warnf("Found multiple config files with supported names: %s", strings.Join(candidates, ", "))
|
||||
log.Warnf("Using %s\n", winner)
|
||||
}
|
||||
return []string{winner}, nil
|
||||
}
|
||||
parent := filepath.Dir(pwd)
|
||||
if parent == pwd {
|
||||
return nil, nil
|
||||
}
|
||||
pwd = parent
|
||||
}
|
||||
}
|
||||
|
||||
func parseConfigs(configPaths []string) ([]types.ConfigFile, error) {
|
||||
files := []types.ConfigFile{}
|
||||
for _, f := range configPaths {
|
||||
var (
|
||||
b []byte
|
||||
err error
|
||||
)
|
||||
if f == "-" {
|
||||
b, err = ioutil.ReadAll(os.Stdin)
|
||||
} else {
|
||||
if _, err := os.Stat(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err = ioutil.ReadFile(f)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := loader.ParseYAML(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, types.ConfigFile{Filename: f, Config: config})
|
||||
}
|
||||
return files, nil
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func CombineErrors(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(errors) == 1 {
|
||||
return errors[0]
|
||||
}
|
||||
err := combinedError{}
|
||||
for _, e := range errors {
|
||||
if c, ok := e.(combinedError); ok {
|
||||
err.errors = append(err.errors, c.errors...)
|
||||
} else {
|
||||
err.errors = append(err.errors, e)
|
||||
}
|
||||
}
|
||||
return combinedError{errors}
|
||||
}
|
||||
|
||||
type combinedError struct {
|
||||
errors []error
|
||||
}
|
||||
|
||||
func (c combinedError) Error() string {
|
||||
points := make([]string, len(c.errors))
|
||||
for i, err := range c.errors {
|
||||
points[i] = fmt.Sprintf("* %s", err.Error())
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%d errors occurred:\n\t%s",
|
||||
len(c.errors), strings.Join(points, "\n\t"))
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
package utils
|
||||
|
||||
const (
|
||||
LabelDockerComposePrefix = "com.docker.compose"
|
||||
LabelService = LabelDockerComposePrefix + ".service"
|
||||
LabelVersion = LabelDockerComposePrefix + ".version"
|
||||
LabelContainerNumber = LabelDockerComposePrefix + ".container-number"
|
||||
LabelOneOff = LabelDockerComposePrefix + ".oneoff"
|
||||
LabelNetwork = LabelDockerComposePrefix + ".network"
|
||||
LabelSlug = LabelDockerComposePrefix + ".slug"
|
||||
LabelVolume = LabelDockerComposePrefix + ".volume"
|
||||
LabelConfigHash = LabelDockerComposePrefix + ".config-hash"
|
||||
LabelProject = LabelDockerComposePrefix + ".project"
|
||||
LabelWorkingDir = LabelDockerComposePrefix + ".working_dir"
|
||||
LabelConfigFiles = LabelDockerComposePrefix + ".config_files"
|
||||
LabelEnvironmentFile = LabelDockerComposePrefix + ".environment_file"
|
||||
)
|
Loading…
Reference in New Issue