Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.
Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.
Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.
Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
145 lines
3.8 KiB
Go
145 lines
3.8 KiB
Go
package daemon
|
|
|
|
import (
|
|
"errors"
|
|
"io"
|
|
"log/slog"
|
|
"sync/atomic"
|
|
"testing"
|
|
|
|
"banger/internal/model"
|
|
"banger/internal/vmdns"
|
|
)
|
|
|
|
// TestCloseOnPartiallyInitialisedDaemon pins the contract that Open's
|
|
// error-path defer relies on: Close must be safe to call when a
|
|
// startup step failed before every subsystem was set up. If this
|
|
// breaks, `defer d.Close() on err != nil` in Open() starts panicking
|
|
// on zero-valued fields.
|
|
func TestCloseOnPartiallyInitialisedDaemon(t *testing.T) {
|
|
cases := []struct {
|
|
name string
|
|
build func(t *testing.T) *Daemon
|
|
verify func(t *testing.T, d *Daemon)
|
|
}{
|
|
{
|
|
name: "only store + closing channel (early failure)",
|
|
build: func(t *testing.T) *Daemon {
|
|
return &Daemon{
|
|
store: openDaemonStore(t),
|
|
closing: make(chan struct{}),
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
},
|
|
verify: func(t *testing.T, d *Daemon) {
|
|
// closing channel should have been closed.
|
|
select {
|
|
case <-d.closing:
|
|
default:
|
|
t.Error("closing channel not closed by Close")
|
|
}
|
|
},
|
|
},
|
|
{
|
|
name: "with vmDNS listener (fail after startVMDNS)",
|
|
build: func(t *testing.T) *Daemon {
|
|
server, err := vmdns.New("127.0.0.1:0", nil)
|
|
if err != nil {
|
|
t.Fatalf("vmdns.New: %v", err)
|
|
}
|
|
return &Daemon{
|
|
store: openDaemonStore(t),
|
|
closing: make(chan struct{}),
|
|
net: &HostNetwork{vmDNS: server},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
},
|
|
verify: func(t *testing.T, d *Daemon) {
|
|
if d.net.vmDNS != nil {
|
|
t.Error("vmDNS not cleared by Close")
|
|
}
|
|
},
|
|
},
|
|
}
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
d := tc.build(t)
|
|
if err := d.Close(); err != nil {
|
|
t.Fatalf("Close returned error: %v", err)
|
|
}
|
|
tc.verify(t, d)
|
|
|
|
// Second Close must be a no-op (sync.Once) — must not
|
|
// panic on channel or re-close.
|
|
if err := d.Close(); err != nil {
|
|
t.Fatalf("second Close error: %v", err)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestCloseIdempotentUnderConcurrency catches regressions of the
|
|
// sync.Once guard that makes repeated Close calls safe. The open-
|
|
// failure defer relies on this: if the user cancels before Open
|
|
// returns and also calls Close afterwards, both paths must survive.
|
|
func TestCloseIdempotentUnderConcurrency(t *testing.T) {
|
|
d := &Daemon{
|
|
store: openDaemonStore(t),
|
|
closing: make(chan struct{}),
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
config: model.DaemonConfig{BridgeName: ""},
|
|
}
|
|
wireServices(d)
|
|
|
|
var count atomic.Int32
|
|
done := make(chan struct{})
|
|
for i := 0; i < 5; i++ {
|
|
go func() {
|
|
if err := d.Close(); err != nil {
|
|
t.Errorf("Close error: %v", err)
|
|
}
|
|
count.Add(1)
|
|
if count.Load() == 5 {
|
|
close(done)
|
|
}
|
|
}()
|
|
}
|
|
<-done
|
|
|
|
// Channel must be closed exactly once (sync.Once covers the
|
|
// inner close(d.closing)). Reading from a closed channel is
|
|
// non-blocking; panicking here would mean the channel wasn't
|
|
// closed or was double-closed (close panics are uncatchable).
|
|
select {
|
|
case <-d.closing:
|
|
default:
|
|
t.Fatal("closing channel not closed after concurrent Close calls")
|
|
}
|
|
}
|
|
|
|
// TestOpenFailureRunsCloseCleanup is a structural check: confirms
|
|
// the deferred rollback in Open actually fires. Can't easily run
|
|
// Open() end-to-end (hits paths.Resolve + sudo), but we can simulate
|
|
// the pattern by threading a named-return err through the same
|
|
// defer and asserting Close runs.
|
|
func TestOpenFailureRunsCloseCleanup(t *testing.T) {
|
|
closed := false
|
|
fakeClose := func() { closed = true }
|
|
|
|
runOpen := func() (err error) {
|
|
defer func() {
|
|
if err != nil {
|
|
fakeClose()
|
|
}
|
|
}()
|
|
err = errors.New("simulated late-stage startup failure")
|
|
return err
|
|
}
|
|
|
|
if err := runOpen(); err == nil {
|
|
t.Fatal("expected simulated error")
|
|
}
|
|
if !closed {
|
|
t.Fatal("deferred cleanup did not fire on err != nil")
|
|
}
|
|
}
|