|
7 | 7 | "io" |
8 | 8 | "net/url" |
9 | 9 | "os" |
| 10 | + "os/exec" |
10 | 11 | "path" |
11 | 12 | "path/filepath" |
12 | 13 | "sort" |
@@ -145,7 +146,7 @@ type flags struct { |
145 | 146 | ethlink string |
146 | 147 | } |
147 | 148 |
|
148 | | -func dockerCmd() *cobra.Command { |
| 149 | +func dockerCmd(ch chan func() error) *cobra.Command { |
149 | 150 | var flags flags |
150 | 151 |
|
151 | 152 | cmd := &cobra.Command{ |
@@ -284,7 +285,7 @@ func dockerCmd() *cobra.Command { |
284 | 285 | return xerrors.Errorf("wait for dockerd: %w", err) |
285 | 286 | } |
286 | 287 |
|
287 | | - err = runDockerCVM(ctx, log, client, blog, flags) |
| 288 | + err = runDockerCVM(ctx, log, client, blog, ch, flags) |
288 | 289 | if err != nil { |
289 | 290 | // It's possible we failed because we ran out of disk while |
290 | 291 | // pulling the image. We should restart the daemon and use |
@@ -313,7 +314,7 @@ func dockerCmd() *cobra.Command { |
313 | 314 | }() |
314 | 315 |
|
315 | 316 | log.Debug(ctx, "reattempting container creation") |
316 | | - err = runDockerCVM(ctx, log, client, blog, flags) |
| 317 | + err = runDockerCVM(ctx, log, client, blog, ch, flags) |
317 | 318 | } |
318 | 319 | if err != nil { |
319 | 320 | blog.Errorf("Failed to run envbox: %v", err) |
@@ -356,7 +357,7 @@ func dockerCmd() *cobra.Command { |
356 | 357 | return cmd |
357 | 358 | } |
358 | 359 |
|
359 | | -func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.DockerClient, blog buildlog.Logger, flags flags) error { |
| 360 | +func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.DockerClient, blog buildlog.Logger, shutdownCh chan func() error, flags flags) error { |
360 | 361 | fs := xunix.GetFS(ctx) |
361 | 362 |
|
362 | 363 | // Set our OOM score to something really unfavorable to avoid getting killed |
@@ -676,31 +677,71 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Docker |
676 | 677 | } |
677 | 678 |
|
678 | 679 | blog.Info("Envbox startup complete!") |
679 | | - |
680 | | - // The bootstrap script doesn't return since it execs the agent |
681 | | - // meaning that it can get pretty noisy if we were to log by default. |
682 | | - // In order to allow users to discern issues getting the bootstrap script |
683 | | - // to complete successfully we pipe the output to stdout if |
684 | | - // CODER_DEBUG=true. |
685 | | - debugWriter := io.Discard |
686 | | - if flags.debug { |
687 | | - debugWriter = os.Stdout |
688 | | - } |
689 | | - // Bootstrap the container if a script has been provided. |
690 | | - blog.Infof("Bootstrapping workspace...") |
691 | | - err = dockerutil.BootstrapContainer(ctx, client, dockerutil.BootstrapConfig{ |
692 | | - ContainerID: containerID, |
693 | | - User: imgMeta.UID, |
694 | | - Script: flags.boostrapScript, |
695 | | - // We set this because the default behavior is to download the agent |
696 | | - // to /tmp/coder.XXXX. This causes a race to happen where we finish |
697 | | - // downloading the binary but before we can execute systemd remounts |
698 | | - // /tmp. |
699 | | - Env: []string{fmt.Sprintf("BINARY_DIR=%s", bootDir)}, |
700 | | - StdOutErr: debugWriter, |
| 680 | + if flags.boostrapScript == "" { |
| 681 | + return nil |
| 682 | + } |
| 683 | + |
| 684 | + bootstrapExec, err := client.ContainerExecCreate(ctx, containerID, dockertypes.ExecConfig{ |
| 685 | + User: imgMeta.UID, |
| 686 | + Cmd: []string{"/bin/sh", "-s"}, |
| 687 | + Env: []string{fmt.Sprintf("BINARY_DIR=%s", bootDir)}, |
| 688 | + AttachStdin: true, |
| 689 | + AttachStdout: true, |
| 690 | + AttachStderr: true, |
| 691 | + Detach: true, |
701 | 692 | }) |
702 | 693 | if err != nil { |
703 | | - return xerrors.Errorf("boostrap container: %w", err) |
| 694 | + return xerrors.Errorf("create exec: %w", err) |
| 695 | + } |
| 696 | + |
| 697 | + resp, err := client.ContainerExecAttach(ctx, bootstrapExec.ID, dockertypes.ExecStartCheck{}) |
| 698 | + if err != nil { |
| 699 | + return xerrors.Errorf("attach exec: %w", err) |
| 700 | + } |
| 701 | + |
| 702 | + _, err = io.Copy(resp.Conn, strings.NewReader(flags.boostrapScript)) |
| 703 | + if err != nil { |
| 704 | + return xerrors.Errorf("copy stdin: %w", err) |
| 705 | + } |
| 706 | + err = resp.CloseWrite() |
| 707 | + if err != nil { |
| 708 | + return xerrors.Errorf("close write: %w", err) |
| 709 | + } |
| 710 | + |
| 711 | + go func() { |
| 712 | + defer resp.Close() |
| 713 | + rd := io.LimitReader(resp.Reader, 1<<10) |
| 714 | + _, err := io.Copy(blog, rd) |
| 715 | + if err != nil { |
| 716 | + log.Error(ctx, "copy bootstrap output", slog.Error(err)) |
| 717 | + } |
| 718 | + }() |
| 719 | + |
| 720 | + // We can't just call ExecInspect because there's a race where the cmd |
| 721 | + // hasn't been assigned a PID yet. |
| 722 | + bootstrapPID, err := dockerutil.GetExecPID(ctx, client, bootstrapExec.ID) |
| 723 | + if err != nil { |
| 724 | + return xerrors.Errorf("exec inspect: %w", err) |
| 725 | + } |
| 726 | + |
| 727 | + shutdownCh <- func() error { |
| 728 | + log.Debug(ctx, "killing container", slog.F("bootstrap_pid", bootstrapPID)) |
| 729 | + |
| 730 | + // The PID returned is the PID _outside_ the container... |
| 731 | + //nolint:gosec |
| 732 | + out, err := exec.Command("kill", "-TERM", strconv.Itoa(bootstrapPID)).CombinedOutput() |
| 733 | + if err != nil { |
| 734 | + return xerrors.Errorf("kill bootstrap process (%s): %w", out, err) |
| 735 | + } |
| 736 | + |
| 737 | + log.Debug(ctx, "sent kill signal waiting for process to exit") |
| 738 | + err = dockerutil.WaitForExit(ctx, client, bootstrapExec.ID) |
| 739 | + if err != nil { |
| 740 | + return xerrors.Errorf("wait for exit: %w", err) |
| 741 | + } |
| 742 | + |
| 743 | + log.Debug(ctx, "bootstrap process successfully exited") |
| 744 | + return nil |
704 | 745 | } |
705 | 746 |
|
706 | 747 | return nil |
|
0 commit comments