mirror of https://github.com/docker/compose.git
Extend compose to kube conversion
(Most of this code is copy/paste from https://github.com/docker/compose-on-kubernetes/tree/master/internal/convert) Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
This commit is contained in:
parent
ba43317862
commit
f7c86a7d30
|
@ -0,0 +1,176 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/helm-prototype/pkg/compose"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func MapToKubernetesObjects(model *compose.Project) (map[string]runtime.Object, error) {
|
||||
objects := map[string]runtime.Object{}
|
||||
for _, service := range model.Services {
|
||||
objects[fmt.Sprintf("%s-service.yaml", service.Name)] = mapToService(model, service)
|
||||
if service.Deploy != nil && service.Deploy.Mode == "global" {
|
||||
daemonset, err := mapToDaemonset(service, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objects[fmt.Sprintf("%s-daemonset.yaml", service.Name)] = daemonset
|
||||
} else {
|
||||
deployment, err := mapToDeployment(service, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objects[fmt.Sprintf("%s-deployment.yaml", service.Name)] = deployment
|
||||
}
|
||||
for _, vol := range service.Volumes {
|
||||
if vol.Type == "volume" {
|
||||
objects[fmt.Sprintf("%s-persistentvolumeclain.yaml", service.Name)] = mapToPVC(service, vol)
|
||||
}
|
||||
}
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func mapToService(model *compose.Project, service types.ServiceConfig) *core.Service {
|
||||
ports := []core.ServicePort{}
|
||||
for _, p := range service.Ports {
|
||||
ports = append(ports,
|
||||
core.ServicePort{
|
||||
Name: fmt.Sprintf("%d-%s", p.Target, strings.ToLower(string(p.Protocol))),
|
||||
Port: int32(p.Target),
|
||||
TargetPort: intstr.FromInt(int(p.Target)),
|
||||
Protocol: toProtocol(p.Protocol),
|
||||
})
|
||||
}
|
||||
|
||||
return &core.Service{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
},
|
||||
Spec: core.ServiceSpec{
|
||||
Selector: map[string]string{"com.docker.compose.service": service.Name},
|
||||
Ports: ports,
|
||||
Type: mapServiceToServiceType(service, model),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mapServiceToServiceType(service types.ServiceConfig, model *compose.Project) core.ServiceType {
|
||||
serviceType := core.ServiceTypeClusterIP
|
||||
if len(service.Networks) == 0 {
|
||||
// service is implicitly attached to "default" network
|
||||
serviceType = core.ServiceTypeLoadBalancer
|
||||
}
|
||||
for name := range service.Networks {
|
||||
if !model.Networks[name].Internal {
|
||||
serviceType = core.ServiceTypeLoadBalancer
|
||||
}
|
||||
}
|
||||
for _, port := range service.Ports {
|
||||
if port.Published != 0 {
|
||||
serviceType = core.ServiceTypeNodePort
|
||||
}
|
||||
}
|
||||
return serviceType
|
||||
}
|
||||
|
||||
func mapToDeployment(service types.ServiceConfig, model *compose.Project) (*apps.Deployment, error) {
|
||||
labels := map[string]string{
|
||||
"com.docker.compose.service": service.Name,
|
||||
"com.docker.compose.project": model.Name,
|
||||
}
|
||||
podTemplate, err := toPodTemplate(service, labels, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &apps.Deployment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: toReplicas(service.Deploy),
|
||||
Strategy: toDeploymentStrategy(service.Deploy),
|
||||
Template: podTemplate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapToDaemonset(service types.ServiceConfig, model *compose.Project) (*apps.DaemonSet, error) {
|
||||
labels := map[string]string{
|
||||
"com.docker.compose.service": service.Name,
|
||||
"com.docker.compose.project": model.Name,
|
||||
}
|
||||
podTemplate, err := toPodTemplate(service, labels, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Template: podTemplate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
func toReplicas(deploy *types.DeployConfig) *int32 {
|
||||
v := int32(1)
|
||||
if deploy != nil {
|
||||
v = int32(*deploy.Replicas)
|
||||
}
|
||||
return &v
|
||||
}
|
||||
|
||||
func toDeploymentStrategy(deploy *types.DeployConfig) apps.DeploymentStrategy {
|
||||
if deploy == nil || deploy.UpdateConfig == nil {
|
||||
return apps.DeploymentStrategy{
|
||||
Type: apps.RecreateDeploymentStrategyType,
|
||||
}
|
||||
}
|
||||
return apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: &intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: int32(*deploy.UpdateConfig.Parallelism),
|
||||
},
|
||||
MaxSurge: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mapToPVC(service types.ServiceConfig, vol types.ServiceVolumeConfig) runtime.Object {
|
||||
return &core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: vol.Source,
|
||||
Labels: map[string]string{"com.docker.compose.service": service.Name},
|
||||
},
|
||||
Spec: core.PersistentVolumeClaimSpec{
|
||||
VolumeName: vol.Source,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// toSecondsOrDefault converts a duration string in seconds and defaults to a
|
||||
// given value if the duration is nil.
|
||||
// The supported units are us, ms, s, m and h.
|
||||
func toSecondsOrDefault(duration *types.Duration, defaultValue int32) int32 { //nolint: unparam
|
||||
if duration == nil {
|
||||
return defaultValue
|
||||
}
|
||||
return int32(time.Duration(*duration).Seconds())
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/helm-prototype/pkg/compose"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func toPodTemplate(serviceConfig types.ServiceConfig, labels map[string]string, model *compose.Project) (apiv1.PodTemplateSpec, error) {
|
||||
tpl := apiv1.PodTemplateSpec{}
|
||||
hostAliases, err := toHostAliases(serviceConfig.ExtraHosts)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
env, err := toEnv(serviceConfig.Environment)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
restartPolicy, err := toRestartPolicy(serviceConfig)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
limits, err := toResource(serviceConfig.Deploy)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
requests, err := toResource(serviceConfig.Deploy)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
volumes, err := toVolumes(serviceConfig, model)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
volumeMounts, err := toVolumeMounts(serviceConfig, model)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
}
|
||||
/* pullPolicy, err := toImagePullPolicy(serviceConfig.Image, x-kubernetes-pull-policy)
|
||||
if err != nil {
|
||||
return apiv1.PodTemplateSpec{}, err
|
||||
} */
|
||||
tpl.ObjectMeta = metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: serviceConfig.Labels,
|
||||
}
|
||||
tpl.Spec.RestartPolicy = restartPolicy
|
||||
tpl.Spec.Volumes = volumes
|
||||
tpl.Spec.HostPID = toHostPID(serviceConfig.Pid)
|
||||
tpl.Spec.HostIPC = toHostIPC(serviceConfig.Ipc)
|
||||
tpl.Spec.Hostname = serviceConfig.Hostname
|
||||
tpl.Spec.TerminationGracePeriodSeconds = toTerminationGracePeriodSeconds(serviceConfig.StopGracePeriod)
|
||||
tpl.Spec.HostAliases = hostAliases
|
||||
// FIXME tpl.Spec.Affinity = nodeAffinity
|
||||
// we dont want to remove all containers and recreate them because:
|
||||
// an admission plugin can add sidecar containers
|
||||
// we for sure want to keep the main container to be additive
|
||||
if len(tpl.Spec.Containers) == 0 {
|
||||
tpl.Spec.Containers = []apiv1.Container{{}}
|
||||
}
|
||||
|
||||
containerIX := 0
|
||||
for ix, c := range tpl.Spec.Containers {
|
||||
if c.Name == serviceConfig.Name {
|
||||
containerIX = ix
|
||||
break
|
||||
}
|
||||
}
|
||||
tpl.Spec.Containers[containerIX].Name = serviceConfig.Name
|
||||
tpl.Spec.Containers[containerIX].Image = serviceConfig.Image
|
||||
// FIXME tpl.Spec.Containers[containerIX].ImagePullPolicy = pullPolicy
|
||||
tpl.Spec.Containers[containerIX].Command = serviceConfig.Entrypoint
|
||||
tpl.Spec.Containers[containerIX].Args = serviceConfig.Command
|
||||
tpl.Spec.Containers[containerIX].WorkingDir = serviceConfig.WorkingDir
|
||||
tpl.Spec.Containers[containerIX].TTY = serviceConfig.Tty
|
||||
tpl.Spec.Containers[containerIX].Stdin = serviceConfig.StdinOpen
|
||||
tpl.Spec.Containers[containerIX].Ports = toPorts(serviceConfig.Ports)
|
||||
tpl.Spec.Containers[containerIX].LivenessProbe = toLivenessProbe(serviceConfig.HealthCheck)
|
||||
tpl.Spec.Containers[containerIX].Env = env
|
||||
tpl.Spec.Containers[containerIX].VolumeMounts = volumeMounts
|
||||
tpl.Spec.Containers[containerIX].SecurityContext = toSecurityContext(serviceConfig)
|
||||
tpl.Spec.Containers[containerIX].Resources = apiv1.ResourceRequirements{
|
||||
Limits: limits,
|
||||
Requests: requests,
|
||||
}
|
||||
|
||||
/* FIXME
|
||||
if serviceConfig.PullSecret != "" {
|
||||
pullSecrets := map[string]struct{}{}
|
||||
for _, ps := range tpl.Spec.ImagePullSecrets {
|
||||
pullSecrets[ps.Name] = struct{}{}
|
||||
}
|
||||
if _, ok := pullSecrets[serviceConfig.PullSecret]; !ok {
|
||||
tpl.Spec.ImagePullSecrets = append(tpl.Spec.ImagePullSecrets, apiv1.LocalObjectReference{Name: serviceConfig.PullSecret})
|
||||
}
|
||||
}
|
||||
*/
|
||||
return tpl, nil
|
||||
}
|
||||
|
||||
func toImagePullPolicy(image string, specifiedPolicy string) (apiv1.PullPolicy, error) {
|
||||
if specifiedPolicy == "" {
|
||||
if strings.HasSuffix(image, ":latest") {
|
||||
return apiv1.PullAlways, nil
|
||||
}
|
||||
return apiv1.PullIfNotPresent, nil
|
||||
}
|
||||
switch apiv1.PullPolicy(specifiedPolicy) {
|
||||
case apiv1.PullAlways, apiv1.PullIfNotPresent, apiv1.PullNever:
|
||||
return apiv1.PullPolicy(specifiedPolicy), nil
|
||||
default:
|
||||
return "", errors.Errorf("invalid pull policy %q, must be %q, %q or %q", specifiedPolicy, apiv1.PullAlways, apiv1.PullIfNotPresent, apiv1.PullNever)
|
||||
}
|
||||
}
|
||||
|
||||
func toHostAliases(extraHosts []string) ([]apiv1.HostAlias, error) {
|
||||
if extraHosts == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
byHostnames := map[string]string{}
|
||||
for _, host := range extraHosts {
|
||||
split := strings.SplitN(host, ":", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, errors.Errorf("malformed host %s", host)
|
||||
}
|
||||
byHostnames[split[0]] = split[1]
|
||||
}
|
||||
|
||||
byIPs := map[string][]string{}
|
||||
for k, v := range byHostnames {
|
||||
byIPs[v] = append(byIPs[v], k)
|
||||
}
|
||||
|
||||
aliases := make([]apiv1.HostAlias, len(byIPs))
|
||||
i := 0
|
||||
for key, hosts := range byIPs {
|
||||
sort.Strings(hosts)
|
||||
aliases[i] = apiv1.HostAlias{
|
||||
IP: key,
|
||||
Hostnames: hosts,
|
||||
}
|
||||
i++
|
||||
}
|
||||
sort.Slice(aliases, func(i, j int) bool { return aliases[i].IP < aliases[j].IP })
|
||||
return aliases, nil
|
||||
}
|
||||
|
||||
func toHostPID(pid string) bool {
|
||||
return "host" == pid
|
||||
}
|
||||
|
||||
func toHostIPC(ipc string) bool {
|
||||
return "host" == ipc
|
||||
}
|
||||
|
||||
func toTerminationGracePeriodSeconds(duration *types.Duration) *int64 {
|
||||
if duration == nil {
|
||||
return nil
|
||||
}
|
||||
gracePeriod := int64(time.Duration(*duration).Seconds())
|
||||
return &gracePeriod
|
||||
}
|
||||
|
||||
func toLivenessProbe(hc *types.HealthCheckConfig) *apiv1.Probe {
|
||||
if hc == nil || len(hc.Test) < 1 || hc.Test[0] == "NONE" {
|
||||
return nil
|
||||
}
|
||||
|
||||
command := hc.Test[1:]
|
||||
if hc.Test[0] == "CMD-SHELL" {
|
||||
command = append([]string{"sh", "-c"}, command...)
|
||||
}
|
||||
|
||||
return &apiv1.Probe{
|
||||
TimeoutSeconds: toSecondsOrDefault(hc.Timeout, 1),
|
||||
PeriodSeconds: toSecondsOrDefault(hc.Interval, 1),
|
||||
FailureThreshold: int32(defaultUint64(hc.Retries, 3)),
|
||||
Handler: apiv1.Handler{
|
||||
Exec: &apiv1.ExecAction{
|
||||
Command: command,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toEnv(env map[string]*string) ([]apiv1.EnvVar, error) {
|
||||
var envVars []apiv1.EnvVar
|
||||
|
||||
for k, v := range env {
|
||||
if v == nil {
|
||||
return nil, errors.Errorf("%s has no value, unsetting an environment variable is not supported", k)
|
||||
}
|
||||
envVars = append(envVars, toEnvVar(k, *v))
|
||||
}
|
||||
sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name })
|
||||
return envVars, nil
|
||||
}
|
||||
|
||||
func toEnvVar(key, value string) apiv1.EnvVar {
|
||||
return apiv1.EnvVar{
|
||||
Name: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
func toPorts(list []types.ServicePortConfig) []apiv1.ContainerPort {
|
||||
var ports []apiv1.ContainerPort
|
||||
|
||||
for _, v := range list {
|
||||
ports = append(ports, apiv1.ContainerPort{
|
||||
ContainerPort: int32(v.Target),
|
||||
Protocol: toProtocol(v.Protocol),
|
||||
})
|
||||
}
|
||||
|
||||
return ports
|
||||
}
|
||||
|
||||
func toProtocol(value string) apiv1.Protocol {
|
||||
if value == "udp" {
|
||||
return apiv1.ProtocolUDP
|
||||
}
|
||||
return apiv1.ProtocolTCP
|
||||
}
|
||||
|
||||
func toRestartPolicy(s types.ServiceConfig) (apiv1.RestartPolicy, error) {
|
||||
if s.Deploy == nil || s.Deploy.RestartPolicy == nil {
|
||||
return apiv1.RestartPolicyAlways, nil
|
||||
}
|
||||
policy := s.Deploy.RestartPolicy
|
||||
|
||||
switch policy.Condition {
|
||||
case string(swarm.RestartPolicyConditionAny):
|
||||
return apiv1.RestartPolicyAlways, nil
|
||||
case string(swarm.RestartPolicyConditionNone):
|
||||
return apiv1.RestartPolicyNever, nil
|
||||
case string(swarm.RestartPolicyConditionOnFailure):
|
||||
return apiv1.RestartPolicyOnFailure, nil
|
||||
default:
|
||||
return "", errors.Errorf("unsupported restart policy %s", policy.Condition)
|
||||
}
|
||||
}
|
||||
|
||||
func toResource(deploy *types.DeployConfig) (apiv1.ResourceList, error) {
|
||||
if deploy == nil || deploy.Resources.Limits == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
res := deploy.Resources.Limits
|
||||
list := make(apiv1.ResourceList)
|
||||
if res.NanoCPUs != "" {
|
||||
cpus, err := resource.ParseQuantity(res.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list[apiv1.ResourceCPU] = cpus
|
||||
}
|
||||
if res.MemoryBytes != 0 {
|
||||
memory, err := resource.ParseQuantity(fmt.Sprintf("%v", res.MemoryBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list[apiv1.ResourceMemory] = memory
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func toSecurityContext(s types.ServiceConfig) *apiv1.SecurityContext {
|
||||
isPrivileged := toBoolPointer(s.Privileged)
|
||||
isReadOnly := toBoolPointer(s.ReadOnly)
|
||||
|
||||
var capabilities *apiv1.Capabilities
|
||||
if s.CapAdd != nil || s.CapDrop != nil {
|
||||
capabilities = &apiv1.Capabilities{
|
||||
Add: toCapabilities(s.CapAdd),
|
||||
Drop: toCapabilities(s.CapDrop),
|
||||
}
|
||||
}
|
||||
|
||||
var userID *int64
|
||||
if s.User != "" {
|
||||
numerical, err := strconv.Atoi(s.User)
|
||||
if err == nil {
|
||||
unixUserID := int64(numerical)
|
||||
userID = &unixUserID
|
||||
}
|
||||
}
|
||||
|
||||
if isPrivileged == nil && isReadOnly == nil && capabilities == nil && userID == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &apiv1.SecurityContext{
|
||||
RunAsUser: userID,
|
||||
Privileged: isPrivileged,
|
||||
ReadOnlyRootFilesystem: isReadOnly,
|
||||
Capabilities: capabilities,
|
||||
}
|
||||
}
|
||||
|
||||
func toBoolPointer(value bool) *bool {
|
||||
if value {
|
||||
return &value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultUint64(v *uint64, defaultValue uint64) uint64 { //nolint: unparam
|
||||
if v == nil {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return *v
|
||||
}
|
||||
|
||||
func toCapabilities(list []string) (capabilities []apiv1.Capability) {
|
||||
for _, c := range list {
|
||||
capabilities = append(capabilities, apiv1.Capability(c))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
func forceRestartPolicy(podTemplate apiv1.PodTemplateSpec, forcedRestartPolicy apiv1.RestartPolicy) apiv1.PodTemplateSpec {
|
||||
if podTemplate.Spec.RestartPolicy != "" {
|
||||
podTemplate.Spec.RestartPolicy = forcedRestartPolicy
|
||||
}
|
||||
|
||||
return podTemplate
|
||||
}
|
|
@ -0,0 +1,228 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/helm-prototype/pkg/compose"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const dockerSock = "/var/run/docker.sock"
|
||||
|
||||
type volumeSpec struct {
|
||||
mount apiv1.VolumeMount
|
||||
source *apiv1.VolumeSource
|
||||
}
|
||||
|
||||
func hasPersistentVolumes(s types.ServiceConfig) bool {
|
||||
for _, volume := range s.Volumes {
|
||||
if volume.Type == "volume" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func toVolumeSpecs(s types.ServiceConfig, model *compose.Project) ([]volumeSpec, error) {
|
||||
var specs []volumeSpec
|
||||
for i, m := range s.Volumes {
|
||||
var source *apiv1.VolumeSource
|
||||
name := fmt.Sprintf("mount-%d", i)
|
||||
subpath := ""
|
||||
if m.Source == dockerSock && m.Target == dockerSock {
|
||||
subpath = "docker.sock"
|
||||
source = hostPathVolume("/var/run")
|
||||
} else if strings.HasSuffix(m.Source, ".git") {
|
||||
source = gitVolume(m.Source)
|
||||
} else if m.Type == "volume" {
|
||||
if m.Source != "" {
|
||||
name = m.Source
|
||||
}
|
||||
} else {
|
||||
// bind mount
|
||||
if !filepath.IsAbs(m.Source) {
|
||||
return nil, errors.Errorf("%s: only absolute paths can be specified in mount source", m.Source)
|
||||
}
|
||||
if m.Source == "/" {
|
||||
source = hostPathVolume("/")
|
||||
} else {
|
||||
parent, file := filepath.Split(m.Source)
|
||||
if parent != "/" {
|
||||
parent = strings.TrimSuffix(parent, "/")
|
||||
}
|
||||
source = hostPathVolume(parent)
|
||||
subpath = file
|
||||
}
|
||||
}
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: source,
|
||||
mount: volumeMount(name, m.Target, m.ReadOnly, subpath),
|
||||
})
|
||||
}
|
||||
|
||||
for i, m := range s.Tmpfs {
|
||||
name := fmt.Sprintf("tmp-%d", i)
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: emptyVolumeInMemory(),
|
||||
mount: volumeMount(name, m, false, ""),
|
||||
})
|
||||
}
|
||||
|
||||
for i, s := range s.Secrets {
|
||||
name := fmt.Sprintf("secret-%d", i)
|
||||
|
||||
target := path.Join("/run/secrets", or(s.Target, s.Source))
|
||||
subPath := name
|
||||
readOnly := true
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: secretVolume(s, model.Secrets[name], subPath),
|
||||
mount: volumeMount(name, target, readOnly, subPath),
|
||||
})
|
||||
}
|
||||
|
||||
for i, c := range s.Configs {
|
||||
name := fmt.Sprintf("config-%d", i)
|
||||
|
||||
target := or(c.Target, "/"+c.Source)
|
||||
subPath := name
|
||||
readOnly := true
|
||||
|
||||
specs = append(specs, volumeSpec{
|
||||
source: configVolume(c, model.Configs[name], subPath),
|
||||
mount: volumeMount(name, target, readOnly, subPath),
|
||||
})
|
||||
}
|
||||
|
||||
return specs, nil
|
||||
}
|
||||
|
||||
func or(v string, defaultValue string) string {
|
||||
if v != "" && v != "." {
|
||||
return v
|
||||
}
|
||||
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func toVolumeMounts(s types.ServiceConfig, model *compose.Project) ([]apiv1.VolumeMount, error) {
|
||||
var mounts []apiv1.VolumeMount
|
||||
specs, err := toVolumeSpecs(s, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, spec := range specs {
|
||||
mounts = append(mounts, spec.mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func toVolumes(s types.ServiceConfig, model *compose.Project) ([]apiv1.Volume, error) {
|
||||
var volumes []apiv1.Volume
|
||||
specs, err := toVolumeSpecs(s, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, spec := range specs {
|
||||
if spec.source == nil {
|
||||
continue
|
||||
}
|
||||
volumes = append(volumes, apiv1.Volume{
|
||||
Name: spec.mount.Name,
|
||||
VolumeSource: *spec.source,
|
||||
})
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func gitVolume(path string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
GitRepo: &apiv1.GitRepoVolumeSource{
|
||||
Repository: filepath.ToSlash(path),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func hostPathVolume(path string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{
|
||||
Path: path,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMode(mode *uint32) *int32 {
|
||||
var defaultMode *int32
|
||||
|
||||
if mode != nil {
|
||||
signedMode := int32(*mode)
|
||||
defaultMode = &signedMode
|
||||
}
|
||||
|
||||
return defaultMode
|
||||
}
|
||||
|
||||
func secretVolume(config types.ServiceSecretConfig, topLevelSecret types.SecretConfig, subPath string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
Secret: &apiv1.SecretVolumeSource{
|
||||
SecretName: config.Source,
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: toKey(topLevelSecret.File),
|
||||
Path: subPath,
|
||||
Mode: defaultMode(config.Mode),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func volumeMount(name, path string, readOnly bool, subPath string) apiv1.VolumeMount {
|
||||
return apiv1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: path,
|
||||
ReadOnly: readOnly,
|
||||
SubPath: subPath,
|
||||
}
|
||||
}
|
||||
|
||||
func configVolume(config types.ServiceConfigObjConfig, topLevelConfig types.ConfigObjConfig, subPath string) *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
ConfigMap: &apiv1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{
|
||||
Name: config.Source,
|
||||
},
|
||||
Items: []apiv1.KeyToPath{
|
||||
{
|
||||
Key: toKey(topLevelConfig.File),
|
||||
Path: subPath,
|
||||
Mode: defaultMode(config.Mode),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toKey(file string) string {
|
||||
if file != "" {
|
||||
return path.Base(file)
|
||||
}
|
||||
|
||||
return "file" // TODO: hard-coded key for external configs
|
||||
}
|
||||
|
||||
func emptyVolumeInMemory() *apiv1.VolumeSource {
|
||||
return &apiv1.VolumeSource{
|
||||
EmptyDir: &apiv1.EmptyDirVolumeSource{
|
||||
Medium: apiv1.StorageMediumMemory,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package transform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/helm-prototype/pkg/compose"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func MapToKubernetesObjects(model *compose.Project) (map[string]runtime.Object, error) {
|
||||
objects := map[string]runtime.Object{}
|
||||
for _, service := range model.Services {
|
||||
objects[fmt.Sprintf("%s-service.yaml", service.Name)] = mapToService(service)
|
||||
objects[fmt.Sprintf("%s-deployment.yaml", service.Name)] = mapToDeployment(service)
|
||||
for _, vol := range service.Volumes {
|
||||
if vol.Type == "volume" {
|
||||
objects[fmt.Sprintf("%s-persistentvolumeclain.yaml", service.Name)] = mapToPVC(service, vol)
|
||||
}
|
||||
}
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func mapToService(service types.ServiceConfig) *core.Service {
|
||||
return &core.Service{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
},
|
||||
Spec: core.ServiceSpec{
|
||||
Selector: map[string]string{"com.docker.compose.service": service.Name},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mapToDeployment(service types.ServiceConfig) *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: map[string]string{"com.docker.compose.service": service.Name},
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Template: core.PodTemplateSpec{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{"com.docker.compose.service": service.Name},
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: service.Name,
|
||||
Image: service.Image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mapToPVC(service types.ServiceConfig, vol types.ServiceVolumeConfig) runtime.Object {
|
||||
return &core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: vol.Source,
|
||||
Labels: map[string]string{"com.docker.compose.service": service.Name},
|
||||
},
|
||||
Spec: core.PersistentVolumeClaimSpec{
|
||||
VolumeName: vol.Source,
|
||||
},
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue