mirror of
https://gitea.elkins.co/Networking/ccl.git
synced 2025-03-10 05:01:38 -05:00
487 lines
14 KiB
Go
487 lines
14 KiB
Go
/*
|
|
Package container encapuslates both the metadata structure and main
|
|
operations to be presented to the user in the `cmd` module.
|
|
|
|
Copyright © 2022 Joel D. Elkins <joel@elkins.co>
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
of this software and associated documentation files (the "Software"), to deal
|
|
in the Software without restriction, including without limitation the rights
|
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
copies of the Software, and to permit persons to whom the Software is
|
|
furnished to do so, subject to the following conditions:
|
|
|
|
The above copyright notice and this permission notice shall be included in
|
|
all copies or substantial portions of the Software.
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
THE SOFTWARE.
|
|
*/
|
|
package container
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net"
|
|
"os/exec"
|
|
"regexp"
|
|
|
|
cmd "gitea.elkins.co/Networking/ccl/internal/pkg/command"
|
|
"gitea.elkins.co/Networking/ccl/internal/pkg/network"
|
|
"github.com/containers/common/libnetwork/types"
|
|
"github.com/containers/podman/v4/libpod/define"
|
|
"github.com/containers/podman/v4/pkg/bindings/containers"
|
|
"github.com/containers/podman/v4/pkg/bindings/images"
|
|
"github.com/containers/podman/v4/pkg/specgen"
|
|
spec "github.com/opencontainers/runtime-spec/specs-go"
|
|
log "github.com/sirupsen/logrus"
|
|
"gopkg.in/guregu/null.v4"
|
|
)
|
|
|
|
// Container houses the metadata that may be specified by this utility when
|
|
// creating a container. A couple of fields (Name and Image) are mandatory to
|
|
// specify, but the rest will use the libpod or otherwise sensible defaults.
|
|
type Container struct {
|
|
Category string `toml:"category"`
|
|
Name string `toml:"name"`
|
|
Image string `toml:"image"`
|
|
Hostname string `toml:"hostname,omitempty"`
|
|
Command []string `toml:"cmd,omitempty"`
|
|
Arguments string `toml:"arguments,omitempty"`
|
|
Networks []network.Network `toml:"networks,omitempty"`
|
|
Env map[string]string `toml:"env,omitempty"`
|
|
Mounts []spec.Mount `toml:"mounts,omitempty"`
|
|
Restart string `toml:"restart,omitempty"`
|
|
Umask null.Int `toml:"umask,omitempty"`
|
|
User string `toml:"user,omitempty"`
|
|
ExposeTCP []uint16 `toml:"expose_tcp,omitempty"`
|
|
ExposeUDP []uint16 `toml:"expose_udp,omitempty"`
|
|
PortsTCP map[uint16]uint16 `toml:"ports,omitempty"`
|
|
NetNS string `toml:"netns,omitempty"`
|
|
StartGroup int `toml:"group,omitempty"`
|
|
|
|
conn context.Context
|
|
cdataChan chan chan *define.InspectContainerData // do not use directly -- use getCData()
|
|
cdataInval chan bool // send a bool when container state changes
|
|
wasRunning bool
|
|
}
|
|
|
|
// Init will initialize a new container structure by filling in network details
|
|
// and by querying other metadata from libpod, if possible.
|
|
func (c *Container) Init(conn context.Context, nets []*network.Network) error {
|
|
// initialize user-provided definitions
|
|
for i := range c.Networks {
|
|
var n *network.Network
|
|
for j := range nets {
|
|
if nets[j].Name == c.Networks[i].Name {
|
|
n = nets[j]
|
|
}
|
|
}
|
|
if n == nil {
|
|
continue
|
|
}
|
|
if len(c.Networks[i].DNS) == 0 {
|
|
c.Networks[i].DNS = n.DNS
|
|
}
|
|
if !c.Networks[i].IPv6.Valid {
|
|
if n.IPv6.Valid {
|
|
c.Networks[i].IPv6 = n.IPv6
|
|
} else {
|
|
c.Networks[i].IPv6.SetValid(true)
|
|
}
|
|
}
|
|
}
|
|
for i := range c.Mounts {
|
|
if c.Mounts[i].Type == "" {
|
|
c.Mounts[i].Type = "bind"
|
|
}
|
|
}
|
|
|
|
if !c.Umask.Valid {
|
|
c.Umask.SetValid(0o022)
|
|
}
|
|
|
|
if c.NetNS == "" {
|
|
c.NetNS = string(specgen.Bridge)
|
|
}
|
|
|
|
if conn == nil {
|
|
return fmt.Errorf("conn is nil: %s", c.Name)
|
|
}
|
|
c.conn = conn
|
|
|
|
c.watchCData()
|
|
c.wasRunning = c.IsRunning()
|
|
return nil
|
|
}
|
|
|
|
// LogEntry will return a *logrus.LogEntry, with some basic fields populated
|
|
// for this container. The idea is that the calling code would add other fields
|
|
// (optionally) and do something with the error.
|
|
func (c *Container) LogEntry() *log.Entry {
|
|
f := log.Fields{
|
|
"container": c.Name,
|
|
"wasRunning": c.wasRunning,
|
|
}
|
|
cdata := c.getCData()
|
|
if cdata != nil && cdata.ID != "" {
|
|
f["id"] = cdata.ID[:12] + "…"
|
|
}
|
|
if cdata != nil && cdata.State != nil {
|
|
f["state"] = cdata.State.Status
|
|
}
|
|
return log.WithFields(f)
|
|
}
|
|
|
|
func (c *Container) pull() error {
|
|
_, err := images.Pull(c.conn, c.Image, &images.PullOptions{})
|
|
return err
|
|
}
|
|
|
|
func (c *Container) newCommandSet(op string, cmds cmd.Commands) cmd.Set {
|
|
return cmd.Set{
|
|
ID: fmt.Sprintf("%s-%s", op, c.Name),
|
|
Commands: cmds,
|
|
}
|
|
}
|
|
|
|
// PullCommands will return a cmd.Set to pull the specified image using the
|
|
// libpod API
|
|
func (c *Container) PullCommands() cmd.Set {
|
|
return c.newCommandSet("PULL", cmd.Commands{
|
|
cmd.NewFunc("do_pull", func() error {
|
|
c.cdataInval <- true
|
|
return c.pull()
|
|
}),
|
|
})
|
|
}
|
|
|
|
// CreateCommands returns a cmd.Set that will create a container from the
|
|
// configured metadata. The container should not exist.
|
|
func (c *Container) CreateCommands() cmd.Set {
|
|
if c.Image == "" {
|
|
return c.newCommandSet("CREATE", cmd.Commands{
|
|
cmd.NewFunc("image_error", func() error {
|
|
return fmt.Errorf("Image not configured")
|
|
}),
|
|
})
|
|
}
|
|
sysctl := map[string]string{}
|
|
nets := map[string]types.PerNetworkOptions{}
|
|
dns := []net.IP{}
|
|
for i := range c.Networks {
|
|
if !c.Networks[i].IPv6.Bool {
|
|
sysctl["net.ipv6.conf."+c.Networks[i].Name+".accept_ra"] = "0"
|
|
}
|
|
ips := []net.IP{}
|
|
if c.Networks[i].IPv4Address != nil {
|
|
ips = append(ips, c.Networks[i].IPv4Address)
|
|
}
|
|
if c.Networks[i].IPv6Address != nil {
|
|
ips = append(ips, c.Networks[i].IPv6Address)
|
|
}
|
|
nets[c.Networks[i].Name] = types.PerNetworkOptions{
|
|
StaticIPs: ips,
|
|
InterfaceName: c.Networks[i].Name,
|
|
}
|
|
dns = append(dns, c.Networks[i].DNS...)
|
|
}
|
|
|
|
expose := map[uint16]string{}
|
|
for _, p := range c.ExposeTCP {
|
|
expose[p] = "tcp"
|
|
}
|
|
for _, p := range c.ExposeUDP {
|
|
expose[p] = "udp"
|
|
}
|
|
|
|
portMappings := []types.PortMapping{}
|
|
for ph, pc := range c.PortsTCP {
|
|
portMappings = append(portMappings, types.PortMapping{
|
|
HostPort: ph,
|
|
ContainerPort: pc,
|
|
Protocol: "tcp",
|
|
})
|
|
}
|
|
|
|
spec := specgen.SpecGenerator{
|
|
ContainerBasicConfig: specgen.ContainerBasicConfig{
|
|
Name: c.Name,
|
|
UtsNS: specgen.Namespace{NSMode: specgen.Private},
|
|
Hostname: c.Hostname,
|
|
RawImageName: c.Image,
|
|
RestartPolicy: c.Restart,
|
|
Sysctl: sysctl,
|
|
Env: c.Env,
|
|
Command: c.Command,
|
|
},
|
|
ContainerStorageConfig: specgen.ContainerStorageConfig{
|
|
Image: c.Image,
|
|
Mounts: c.Mounts,
|
|
},
|
|
ContainerNetworkConfig: specgen.ContainerNetworkConfig{
|
|
Networks: nets,
|
|
DNSServers: dns,
|
|
Expose: expose,
|
|
PortMappings: portMappings,
|
|
PublishExposedPorts: len(expose) > 0,
|
|
NetNS: specgen.Namespace{NSMode: specgen.NamespaceMode(c.NetNS)},
|
|
},
|
|
ContainerSecurityConfig: specgen.ContainerSecurityConfig{
|
|
User: c.User,
|
|
Umask: fmt.Sprintf("%#o", c.Umask.Int64),
|
|
},
|
|
}
|
|
|
|
return c.newCommandSet("CREATE", cmd.Commands{
|
|
cmd.NewFunc("bail_if_exists", func() error {
|
|
if ex, err := containers.Exists(c.conn, c.Name, &containers.ExistsOptions{}); err != nil || ex {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return fmt.Errorf("container %s exists already", c.Name)
|
|
}
|
|
return nil
|
|
}),
|
|
cmd.NewFunc("pull_if_necessary", func() error {
|
|
if ex, err := images.Exists(c.conn, c.Image, &images.ExistsOptions{}); err != nil || !ex {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
c.cdataInval <- true
|
|
return c.pull()
|
|
}
|
|
return nil
|
|
}),
|
|
cmd.NewFunc("validate_spec", spec.Validate),
|
|
cmd.NewFunc("do_create", func() error {
|
|
if _, err := containers.CreateWithSpec(c.conn, &spec, nil); err != nil {
|
|
return err
|
|
}
|
|
c.cdataInval <- true
|
|
return nil
|
|
}),
|
|
})
|
|
}
|
|
|
|
// RecreateCommands will stop (if running), remove (if exists), (re)create, and restart (if
|
|
// it was initially running) a container. The image is not pulled.
|
|
func (c *Container) RecreateCommands() cmd.Set {
|
|
return c.newCommandSet("RECREATE", cmd.Commands{
|
|
cmd.NewSet(c.RemoveCommands()),
|
|
cmd.NewSet(c.CreateCommands()),
|
|
})
|
|
}
|
|
|
|
// RemoveCommands removes a container (as if by `podman rm -f`)
|
|
func (c *Container) RemoveCommands() cmd.Set {
|
|
return c.newCommandSet("REMOVE", cmd.Commands{
|
|
cmd.NewFunc("remove_if_exists", func() error {
|
|
cdata := c.getCData()
|
|
if cdata == nil || cdata.ID == "" {
|
|
return nil
|
|
}
|
|
yes := true
|
|
_, err := containers.Remove(c.conn, cdata.ID, &containers.RemoveOptions{Force: &yes})
|
|
c.cdataInval <- true
|
|
return err
|
|
}),
|
|
})
|
|
}
|
|
|
|
// StartCommands will start a container if it's not already running.
|
|
func (c *Container) StartCommands() cmd.Set {
|
|
return c.newCommandSet("START", cmd.Commands{
|
|
cmd.NewFunc("start_container", func() error {
|
|
if c.IsRunning() {
|
|
c.LogEntry().Debugln("Container start was commanded but it is already running. Not a problem.")
|
|
return nil
|
|
}
|
|
cdata := c.getCData()
|
|
if cdata == nil {
|
|
return fmt.Errorf("container %s is not created; cannot start it", c.Name)
|
|
}
|
|
err := containers.Start(c.conn, cdata.ID, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = containers.Wait(
|
|
c.conn,
|
|
cdata.ID,
|
|
&containers.WaitOptions{Condition: []define.ContainerStatus{define.ContainerStateRunning}},
|
|
)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
c.cdataInval <- true
|
|
cdata = c.getCData()
|
|
if cdata != nil && cdata.HostConfig != nil && cdata.HostConfig.NetworkMode == "bridge" {
|
|
err = c.assureNetNS()
|
|
if err != nil {
|
|
c.LogEntry().WithField("error", err).Warnln("Failed to create network namespace")
|
|
}
|
|
}
|
|
return nil
|
|
}),
|
|
})
|
|
}
|
|
|
|
// IsRunning returns true if libpod reports the container status is running, or
|
|
// false otherwise. If an error happens (e.g. the container is not created),
|
|
// the default value is false.
|
|
func (c *Container) IsRunning() bool {
|
|
cdata := c.getCData()
|
|
if cdata != nil && cdata.State != nil {
|
|
return cdata.State.Running
|
|
}
|
|
return false
|
|
}
|
|
|
|
// IsCreated tests whether libpod sees the container as being created (running or not)
|
|
func (c *Container) IsCreated() bool {
|
|
cdata := c.getCData()
|
|
if cdata == nil || cdata.ID == "" {
|
|
return false
|
|
}
|
|
return true
|
|
}
|
|
|
|
// UpdateCommands will pull the image (to force updates) and then recreate the
|
|
// container. It will be stopped first.
|
|
func (c *Container) UpdateCommands() cmd.Set {
|
|
var startImg string
|
|
return c.newCommandSet("UPDATE", cmd.Commands{
|
|
cmd.NewFunc("get_starting_image", func() error {
|
|
cdata := c.getCData()
|
|
if cdata != nil {
|
|
startImg = cdata.Image
|
|
}
|
|
return nil
|
|
}),
|
|
cmd.NewSet(c.PullCommands()),
|
|
cmd.NewConditional("rebuild_if_updated",
|
|
func() bool {
|
|
tagged, _ := regexp.MatchString(":[-_a-zA-Z0-9]+$", c.Image)
|
|
cImage := c.Image
|
|
if !tagged {
|
|
cImage += ":latest"
|
|
}
|
|
opts := images.ListOptions{
|
|
Filters: map[string][]string{
|
|
"reference": {cImage},
|
|
},
|
|
}
|
|
imgs, err := images.List(c.conn, &opts)
|
|
if len(imgs) < 1 || err != nil {
|
|
return false
|
|
}
|
|
if startImg != imgs[0].Id() {
|
|
return true
|
|
}
|
|
return false
|
|
},
|
|
cmd.NewSet(c.newCommandSet("REBUILD", cmd.Commands{
|
|
cmd.NewSet(c.StopCommands()),
|
|
cmd.NewSet(c.RemoveCommands()),
|
|
cmd.NewSet(c.CreateCommands()),
|
|
})),
|
|
cmd.NewNop(),
|
|
),
|
|
})
|
|
}
|
|
|
|
// ConditionalStartCommands - several of the other command sets would leave the
|
|
// container in the stopped state. This set will restart a container if it was
|
|
// running when this container was first initialized.
|
|
func (c *Container) ConditionalStartCommands() cmd.Set {
|
|
if c.wasRunning {
|
|
return c.StartCommands()
|
|
}
|
|
return cmd.Set{}
|
|
}
|
|
|
|
// StopCommands will stop a container if it is running, defining a 10 second
|
|
// timeout before the processes are killed by lippod
|
|
func (c *Container) StopCommands() cmd.Set {
|
|
return c.newCommandSet("STOP", cmd.Commands{
|
|
cmd.NewFunc("stop_if_running", func() error {
|
|
if !c.IsRunning() {
|
|
c.LogEntry().Debugln("Container stop was commanded but it wasn't running. Not a problem.")
|
|
return nil
|
|
}
|
|
cdata := c.getCData()
|
|
var timeout uint = 10
|
|
err := containers.Stop(c.conn, cdata.ID, &containers.StopOptions{Timeout: &timeout})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = containers.Wait(
|
|
c.conn,
|
|
cdata.ID,
|
|
&containers.WaitOptions{Condition: []define.ContainerStatus{define.ContainerStateExited}},
|
|
)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
c.cdataInval <- true
|
|
return nil
|
|
}),
|
|
})
|
|
}
|
|
|
|
func (c *Container) watchCData() {
|
|
c.cdataChan = make(chan chan *define.InspectContainerData)
|
|
c.cdataInval = make(chan bool)
|
|
go func() {
|
|
var cache *define.InspectContainerData
|
|
for {
|
|
select {
|
|
case dchan := <-c.cdataChan:
|
|
if cache == nil {
|
|
no := false
|
|
cache, _ = containers.Inspect(c.conn, c.Name, &containers.InspectOptions{Size: &no})
|
|
}
|
|
dchan <- cache
|
|
case <-c.cdataInval:
|
|
cache = nil
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
func (c *Container) getCData() *define.InspectContainerData {
|
|
cc := make(chan *define.InspectContainerData)
|
|
c.cdataChan <- cc
|
|
return <-cc
|
|
}
|
|
|
|
// Pid will return the host process id of the main container process (pid
|
|
// 1 inside the container)
|
|
func (c *Container) Pid() int {
|
|
cdata := c.getCData()
|
|
if cdata != nil && cdata.State != nil {
|
|
return cdata.State.Pid
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (c *Container) assureNetNS() error {
|
|
cdata := c.getCData()
|
|
if nil == cdata || nil == cdata.NetworkSettings {
|
|
return fmt.Errorf("network namespace not available")
|
|
}
|
|
netns := cdata.NetworkSettings.SandboxKey
|
|
if err := exec.Command("rm", "-f", "/var/run/netns/"+c.Name).Run(); err != nil {
|
|
return err
|
|
}
|
|
if err := exec.Command("ln", "-sf", netns, "/var/run/netns/"+c.Name).Run(); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|