mirror of https://github.com/docker/compose.git
Use compose resource request to set ACI retrouve request, and single container resource requests.
Limits should be used to allow one container in a container group to use more than the container requested resources, up to the total resources requested for the container group. Signed-off-by: Guillaume Tardif <guillaume.tardif@docker.com>
This commit is contained in:
parent
cda66b5e90
commit
c09f771359
|
@ -54,7 +54,7 @@ func ContainerToComposeProject(r containers.ContainerConfig) (types.Project, err
|
|||
Environment: toComposeEnvs(r.Environment),
|
||||
Deploy: &types.DeployConfig{
|
||||
Resources: types.Resources{
|
||||
Limits: &types.Resource{
|
||||
Reservations: &types.Resource{
|
||||
NanoCPUs: fmt.Sprintf("%f", r.CPULimit),
|
||||
MemoryBytes: types.UnitBytes(r.MemLimit.Value()),
|
||||
},
|
||||
|
|
|
@ -175,10 +175,6 @@ func getDNSSidecar(containers []containerinstance.Container) containerinstance.C
|
|||
Image: to.StringPtr(dnsSidecarImage),
|
||||
Command: &alpineCmd,
|
||||
Resources: &containerinstance.ResourceRequirements{
|
||||
Limits: &containerinstance.ResourceLimits{
|
||||
MemoryInGB: to.Float64Ptr(0.1), // "The memory requirement should be in incrememts of 0.1 GB."
|
||||
CPU: to.Float64Ptr(0.01), // "The CPU requirement should be in incrememts of 0.01."
|
||||
},
|
||||
Requests: &containerinstance.ResourceRequests{
|
||||
MemoryInGB: to.Float64Ptr(0.1),
|
||||
CPU: to.Float64Ptr(0.01),
|
||||
|
@ -357,14 +353,14 @@ func (s serviceConfigAciHelper) getAciContainer(volumesCache map[string]bool) (c
|
|||
volumes = &allVolumes
|
||||
}
|
||||
|
||||
memLimit := 1. // Default 1 Gb
|
||||
var cpuLimit float64 = 1
|
||||
if s.Deploy != nil && s.Deploy.Resources.Limits != nil {
|
||||
if s.Deploy.Resources.Limits.MemoryBytes != 0 {
|
||||
memLimit = bytesToGb(s.Deploy.Resources.Limits.MemoryBytes)
|
||||
memRequest := 1. // Default 1 Gb
|
||||
var cpuRequest float64 = 1
|
||||
if s.Deploy != nil && s.Deploy.Resources.Reservations != nil {
|
||||
if s.Deploy.Resources.Reservations.MemoryBytes != 0 {
|
||||
memRequest = bytesToGb(s.Deploy.Resources.Reservations.MemoryBytes)
|
||||
}
|
||||
if s.Deploy.Resources.Limits.NanoCPUs != "" {
|
||||
cpuLimit, err = strconv.ParseFloat(s.Deploy.Resources.Limits.NanoCPUs, 0)
|
||||
if s.Deploy.Resources.Reservations.NanoCPUs != "" {
|
||||
cpuRequest, err = strconv.ParseFloat(s.Deploy.Resources.Reservations.NanoCPUs, 0)
|
||||
if err != nil {
|
||||
return containerinstance.Container{}, err
|
||||
}
|
||||
|
@ -377,13 +373,9 @@ func (s serviceConfigAciHelper) getAciContainer(volumesCache map[string]bool) (c
|
|||
Command: to.StringSlicePtr(s.Command),
|
||||
EnvironmentVariables: getEnvVariables(s.Environment),
|
||||
Resources: &containerinstance.ResourceRequirements{
|
||||
Limits: &containerinstance.ResourceLimits{
|
||||
MemoryInGB: to.Float64Ptr(memLimit),
|
||||
CPU: to.Float64Ptr(cpuLimit),
|
||||
},
|
||||
Requests: &containerinstance.ResourceRequests{
|
||||
MemoryInGB: to.Float64Ptr(memLimit), // TODO: use the memory requests here and not limits
|
||||
CPU: to.Float64Ptr(cpuLimit), // TODO: use the cpu requests here and not limits
|
||||
MemoryInGB: to.Float64Ptr(memRequest),
|
||||
CPU: to.Float64Ptr(cpuRequest),
|
||||
},
|
||||
},
|
||||
VolumeMounts: volumes,
|
||||
|
|
|
@ -536,7 +536,7 @@ func TestComposeContainerGroupToContainerIgnoreDomainNameWithoutPorts(t *testing
|
|||
assert.Assert(t, group.IPAddress == nil)
|
||||
}
|
||||
|
||||
func TestComposeContainerGroupToContainerResourceLimits(t *testing.T) {
|
||||
func TestComposeContainerGroupToContainerResourceRequests(t *testing.T) {
|
||||
_0_1Gb := 0.1 * 1024 * 1024 * 1024
|
||||
project := types.Project{
|
||||
Services: []types.ServiceConfig{
|
||||
|
@ -545,7 +545,7 @@ func TestComposeContainerGroupToContainerResourceLimits(t *testing.T) {
|
|||
Image: "image1",
|
||||
Deploy: &types.DeployConfig{
|
||||
Resources: types.Resources{
|
||||
Limits: &types.Resource{
|
||||
Reservations: &types.Resource{
|
||||
NanoCPUs: "0.1",
|
||||
MemoryBytes: types.UnitBytes(_0_1Gb),
|
||||
},
|
||||
|
@ -558,12 +558,12 @@ func TestComposeContainerGroupToContainerResourceLimits(t *testing.T) {
|
|||
group, err := ToContainerGroup(context.TODO(), convertCtx, project, mockStorageHelper)
|
||||
assert.NilError(t, err)
|
||||
|
||||
limits := *((*group.Containers)[0]).Resources.Limits
|
||||
assert.Equal(t, *limits.CPU, float64(0.1))
|
||||
assert.Equal(t, *limits.MemoryInGB, float64(0.1))
|
||||
request := *((*group.Containers)[0]).Resources.Requests
|
||||
assert.Equal(t, *request.CPU, float64(0.1))
|
||||
assert.Equal(t, *request.MemoryInGB, float64(0.1))
|
||||
}
|
||||
|
||||
func TestComposeContainerGroupToContainerResourceLimitsDefaults(t *testing.T) {
|
||||
func TestComposeContainerGroupToContainerResourceRequestsDefaults(t *testing.T) {
|
||||
project := types.Project{
|
||||
Services: []types.ServiceConfig{
|
||||
{
|
||||
|
@ -571,7 +571,7 @@ func TestComposeContainerGroupToContainerResourceLimitsDefaults(t *testing.T) {
|
|||
Image: "image1",
|
||||
Deploy: &types.DeployConfig{
|
||||
Resources: types.Resources{
|
||||
Limits: &types.Resource{
|
||||
Reservations: &types.Resource{
|
||||
NanoCPUs: "",
|
||||
MemoryBytes: 0,
|
||||
},
|
||||
|
@ -584,9 +584,9 @@ func TestComposeContainerGroupToContainerResourceLimitsDefaults(t *testing.T) {
|
|||
group, err := ToContainerGroup(context.TODO(), convertCtx, project, mockStorageHelper)
|
||||
assert.NilError(t, err)
|
||||
|
||||
limits := *((*group.Containers)[0]).Resources.Limits
|
||||
assert.Equal(t, *limits.CPU, float64(1))
|
||||
assert.Equal(t, *limits.MemoryInGB, float64(1))
|
||||
request := *((*group.Containers)[0]).Resources.Requests
|
||||
assert.Equal(t, *request.CPU, float64(1))
|
||||
assert.Equal(t, *request.MemoryInGB, float64(1))
|
||||
}
|
||||
|
||||
func TestComposeContainerGroupToContainerenvVar(t *testing.T) {
|
||||
|
|
Loading…
Reference in New Issue