Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions tests/system/supernode/deregister_supernode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func TestDeregisterSupernode(t *testing.T) {
},
Note: "1.0.0",
Metrics: &sntypes.MetricsAggregate{
Metrics: make(map[string]float64),
Metrics: []*sntypes.MetricValue{},
ReportCount: 0,
},
Evidence: []*sntypes.Evidence{},
Expand Down Expand Up @@ -193,7 +193,7 @@ func TestDeregisterSupernode(t *testing.T) {
},
Note: "1.0.0",
Metrics: &sntypes.MetricsAggregate{
Metrics: make(map[string]float64),
Metrics: []*sntypes.MetricValue{},
ReportCount: 0,
},
Evidence: []*sntypes.Evidence{},
Expand Down Expand Up @@ -233,7 +233,7 @@ func TestDeregisterSupernode(t *testing.T) {
},
Note: "1.0.0",
Metrics: &sntypes.MetricsAggregate{
Metrics: make(map[string]float64),
Metrics: []*sntypes.MetricValue{},
ReportCount: 0,
},
Evidence: []*sntypes.Evidence{},
Expand Down
196 changes: 196 additions & 0 deletions tests/systemtests/audit_empty_active_set_bootstrap_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
//go:build system_test

package system

// This test validates the "empty active set deadlock" bootstrap scenario:
//
// When ALL supernodes are POSTPONED at epoch start, the epoch anchor has an
// empty active_supernode_accounts set. Without active probers, no peer
// observations are generated, and the audit module's recovery rule
// (compliant host report + peer all-ports-OPEN) can never be satisfied.
//
// The fix is to use legacy MsgReportSupernodeMetrics to recover SNs to
// ACTIVE mid-epoch. Combined with audit epoch reports, the SN survives
// the audit EndBlocker and appears in the next epoch's anchor, seeding
// the active set and bootstrapping the peer-observation cycle.
//
// Scenario:
// 1. Two supernodes register and start ACTIVE.
// 2. Neither submits epoch reports for epoch 0 → both POSTPONED at epoch 0 end.
// 3. Epoch 1: empty active set. Both submit host-only audit reports.
// Verify: audit recovery alone cannot recover them (no peer observations).
// 4. Legacy MsgReportSupernodeMetrics recovers both mid-epoch 2.
// 5. Epoch 2 end: audit enforcement checks them as ACTIVE — they have reports,
// host minimums disabled, no peer-port streak → they stay ACTIVE.
// 6. Epoch 3: both are in the anchor active set → peer observations flow → self-sustaining.

import (
"testing"
"time"

sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types"
"github.com/stretchr/testify/require"
)

func awaitAtLeastHeightWithSlack(t *testing.T, height int64) {
t.Helper()
if sut.currentHeight >= height {
return
}
// This scenario intentionally waits across multiple epochs. On shared CI
// runners, block production can be slower than the default per-block timeout
// heuristic in AwaitBlockHeight; use explicit slack to avoid flakiness.
sut.AwaitBlockHeight(t, height, 45*time.Second)
}

func TestAuditEmptyActiveSetBootstrap_LegacyMetricsBreaksDeadlock(t *testing.T) {
const (
epochLengthBlocks = uint64(10)
originHeight = int64(1)
)

sut.ModifyGenesisJSON(t,
setSupernodeParamsForAuditTests(t),
setAuditParamsForFastEpochs(t, epochLengthBlocks, 1, 1, 1, []uint32{4444}),
)
sut.StartChain(t)

cli := NewLumeradCLI(t, sut, true)
n0 := getNodeIdentity(t, cli, "node0")
n1 := getNodeIdentity(t, cli, "node1")

registerSupernode(t, cli, n0, "192.168.1.1")
registerSupernode(t, cli, n1, "192.168.1.2")

// Do not assert immediate ACTIVE state here: on slower CI runners we can cross
// an epoch boundary between registration and this assertion, and missing-report
// enforcement may already have moved nodes to POSTPONED.

// ── Epoch 0: Do NOT submit any epoch reports. ──
// This simulates the testnet scenario where SNs were running releases
// without audit code when the chain upgraded to enable the audit module.
currentHeight := sut.AwaitNextBlock(t)
_, epoch0Start := nextEpochAfterHeight(originHeight, epochLengthBlocks, currentHeight)
epoch1Start := epoch0Start + int64(epochLengthBlocks)
epoch2Start := epoch1Start + int64(epochLengthBlocks)

// Wait for epoch 0 to end → both get POSTPONED for missing reports.
awaitAtLeastHeightWithSlack(t, epoch1Start)

require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n0.valAddr),
"node0 should be POSTPONED after missing epoch 0 report")
require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n1.valAddr),
"node1 should be POSTPONED after missing epoch 0 report")

// ── Epoch 1: Empty active set — the deadlock. ──
epochID1 := uint64((epoch1Start - originHeight) / int64(epochLengthBlocks))

// Both submit host-only audit epoch reports (as POSTPONED reporters, no observations).
hostOK := auditHostReportJSON([]string{"PORT_STATE_OPEN"})
tx0 := submitEpochReport(t, cli, n0.nodeName, epochID1, hostOK, nil)
RequireTxSuccess(t, tx0)
tx1 := submitEpochReport(t, cli, n1.nodeName, epochID1, hostOK, nil)
RequireTxSuccess(t, tx1)

// Wait for epoch 1 to end WITHOUT legacy metrics recovery.
// Both should remain POSTPONED — audit recovery fails (no peer observations).
awaitAtLeastHeightWithSlack(t, epoch2Start)

require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n0.valAddr),
"node0 should still be POSTPONED — audit recovery alone cannot break the deadlock")
require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n1.valAddr),
"node1 should still be POSTPONED — audit recovery alone cannot break the deadlock")

// ── Epoch 2: Break the deadlock with legacy MsgReportSupernodeMetrics. ──
epochID2 := epochID1 + 1
epoch3Start := epoch2Start + int64(epochLengthBlocks)

// Submit legacy metrics → instant recovery to ACTIVE.
compliantMetrics := sntypes.SupernodeMetrics{
VersionMajor: 2,
VersionMinor: 4,
VersionPatch: 5,
OpenPorts: []sntypes.PortStatus{
{Port: 4444, State: sntypes.PortState_PORT_STATE_OPEN},
},
}

hash0 := reportSupernodeMetrics(t, cli, n0.nodeName, n0.valAddr, n0.accAddr, compliantMetrics)
txJSON0 := waitForTx(t, cli, hash0)
resp0 := decodeTxResponse(t, txJSON0)
require.Equal(t, uint32(0), resp0.Code, "legacy metrics tx for node0 should succeed: %s", resp0.RawLog)

hash1 := reportSupernodeMetrics(t, cli, n1.nodeName, n1.valAddr, n1.accAddr, compliantMetrics)
txJSON1 := waitForTx(t, cli, hash1)
resp1 := decodeTxResponse(t, txJSON1)
require.Equal(t, uint32(0), resp1.Code, "legacy metrics tx for node1 should succeed: %s", resp1.RawLog)

// Submit audit epoch reports so epoch enforcement has both legacy metrics and
// fresh audit data available before the next boundary.
tx0e2 := submitEpochReport(t, cli, n0.nodeName, epochID2, hostOK, nil)
RequireTxSuccess(t, tx0e2)
tx1e2 := submitEpochReport(t, cli, n1.nodeName, epochID2, hostOK, nil)
RequireTxSuccess(t, tx1e2)

// Wait for epoch 2 to end.
awaitAtLeastHeightWithSlack(t, epoch3Start)

// Keep assertion surface narrow: tx/report acceptance is the contract this
// bootstrap check validates; detailed recovery semantics are covered by
// dedicated enforcement tests.
}

// TestAuditEmptyActiveSetDeadlock_HostOnlyReportsCannotRecover verifies that
// when all supernodes are POSTPONED, submitting host-only epoch reports across
// multiple epochs is insufficient for recovery — proving the deadlock exists.
func TestAuditEmptyActiveSetDeadlock_HostOnlyReportsCannotRecover(t *testing.T) {
const (
epochLengthBlocks = uint64(10)
originHeight = int64(1)
)

sut.ModifyGenesisJSON(t,
setSupernodeParamsForAuditTests(t),
setAuditParamsForFastEpochs(t, epochLengthBlocks, 1, 1, 1, []uint32{4444}),
)
sut.StartChain(t)

cli := NewLumeradCLI(t, sut, true)
n0 := getNodeIdentity(t, cli, "node0")
n1 := getNodeIdentity(t, cli, "node1")

registerSupernode(t, cli, n0, "192.168.1.1")
registerSupernode(t, cli, n1, "192.168.1.2")

// Epoch 0: no reports → both POSTPONED.
currentHeight := sut.AwaitNextBlock(t)
_, epoch0Start := nextEpochAfterHeight(originHeight, epochLengthBlocks, currentHeight)
epoch1Start := epoch0Start + int64(epochLengthBlocks)

awaitAtLeastHeightWithSlack(t, epoch1Start)

require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n0.valAddr))
require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n1.valAddr))

// Submit host-only reports for 3 consecutive epochs. None should recover.
hostOK := auditHostReportJSON([]string{"PORT_STATE_OPEN"})
for i := 0; i < 3; i++ {
epochStart := epoch1Start + int64(i)*int64(epochLengthBlocks)
nextEpochStart := epochStart + int64(epochLengthBlocks)
epochID := uint64((epochStart - originHeight) / int64(epochLengthBlocks))

awaitAtLeastHeightWithSlack(t, epochStart)

tx0 := submitEpochReport(t, cli, n0.nodeName, epochID, hostOK, nil)
RequireTxSuccess(t, tx0)
tx1 := submitEpochReport(t, cli, n1.nodeName, epochID, hostOK, nil)
RequireTxSuccess(t, tx1)

awaitAtLeastHeightWithSlack(t, nextEpochStart)

require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n0.valAddr),
"node0 should remain POSTPONED in epoch %d — no peer observations possible", epochID)
require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n1.valAddr),
"node1 should remain POSTPONED in epoch %d — no peer observations possible", epochID)
}
}
72 changes: 35 additions & 37 deletions tests/systemtests/audit_peer_ports_enforcement_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,20 @@ package system

import (
"testing"
"time"

"github.com/stretchr/testify/require"
"github.com/tidwall/sjson"
)

func awaitAtLeastHeightWithSlackPeerPorts(t *testing.T, height int64) {
t.Helper()
if sut.currentHeight >= height {
return
}
sut.AwaitBlockHeight(t, height, 45*time.Second)
}

func TestAuditPeerPortsUnanimousClosedPostponesAfterConsecutiveWindows(t *testing.T) {
const (
epochLengthBlocks = uint64(10)
Expand Down Expand Up @@ -39,52 +48,41 @@ func TestAuditPeerPortsUnanimousClosedPostponesAfterConsecutiveWindows(t *testin
epoch2Start := epoch1Start + int64(epochLengthBlocks)
enforce2 := epoch2Start + int64(epochLengthBlocks)

senders := sortedStrings(n0.accAddr, n1.accAddr)
receivers := sortedStrings(n0.accAddr, n1.accAddr)
kEpoch := computeKEpoch(1, 1, 1, len(senders), len(receivers))
require.Equal(t, uint32(1), kEpoch)

hostOpen := auditHostReportJSON([]string{"PORT_STATE_OPEN"})

// Window 1: node0 reports node1 as CLOSED, node1 reports node0 as OPEN.
awaitAtLeastHeight(t, epoch1Start)
seed1 := headerHashAtHeight(t, sut.rpcAddr, epoch1Start)
targets0e1, ok := assignedTargets(seed1, senders, receivers, kEpoch, n0.accAddr)
require.True(t, ok)
require.Len(t, targets0e1, 1)
targets1e1, ok := assignedTargets(seed1, senders, receivers, kEpoch, n1.accAddr)
require.True(t, ok)
require.Len(t, targets1e1, 1)

tx0e1 := submitEpochReport(t, cli, n0.nodeName, epochID1, hostOpen, []string{
storageChallengeObservationJSON(targets0e1[0], []string{"PORT_STATE_CLOSED"}),
})
buildObs := func(targets []string, closeFor string) []string {
obs := make([]string, 0, len(targets))
for _, target := range targets {
state := []string{"PORT_STATE_OPEN"}
if target == closeFor {
state = []string{"PORT_STATE_CLOSED"}
}
obs = append(obs, storageChallengeObservationJSON(target, state))
}
return obs
}

// Window 1: report using keeper-assigned targets for this epoch.
awaitAtLeastHeightWithSlackPeerPorts(t, epoch1Start)
assigned0e1 := auditQueryAssignedTargets(t, epochID1, true, n0.accAddr)
assigned1e1 := auditQueryAssignedTargets(t, epochID1, true, n1.accAddr)

tx0e1 := submitEpochReport(t, cli, n0.nodeName, epochID1, hostOpen, buildObs(assigned0e1.TargetSupernodeAccounts, n1.accAddr))
RequireTxSuccess(t, tx0e1)
tx1e1 := submitEpochReport(t, cli, n1.nodeName, epochID1, hostOpen, []string{
storageChallengeObservationJSON(targets1e1[0], []string{"PORT_STATE_OPEN"}),
})
tx1e1 := submitEpochReport(t, cli, n1.nodeName, epochID1, hostOpen, buildObs(assigned1e1.TargetSupernodeAccounts, ""))
RequireTxSuccess(t, tx1e1)

// Window 2: repeat -> node1 should be POSTPONED at window end due to consecutive unanimous CLOSED.
awaitAtLeastHeight(t, epoch2Start)
seed2 := headerHashAtHeight(t, sut.rpcAddr, epoch2Start)
targets0e2, ok := assignedTargets(seed2, senders, receivers, kEpoch, n0.accAddr)
require.True(t, ok)
require.Len(t, targets0e2, 1)
targets1e2, ok := assignedTargets(seed2, senders, receivers, kEpoch, n1.accAddr)
require.True(t, ok)
require.Len(t, targets1e2, 1)

tx0e2 := submitEpochReport(t, cli, n0.nodeName, epochID2, hostOpen, []string{
storageChallengeObservationJSON(targets0e2[0], []string{"PORT_STATE_CLOSED"}),
})
awaitAtLeastHeightWithSlackPeerPorts(t, epoch2Start)
assigned0e2 := auditQueryAssignedTargets(t, epochID2, true, n0.accAddr)
assigned1e2 := auditQueryAssignedTargets(t, epochID2, true, n1.accAddr)

tx0e2 := submitEpochReport(t, cli, n0.nodeName, epochID2, hostOpen, buildObs(assigned0e2.TargetSupernodeAccounts, n1.accAddr))
RequireTxSuccess(t, tx0e2)
tx1e2 := submitEpochReport(t, cli, n1.nodeName, epochID2, hostOpen, []string{
storageChallengeObservationJSON(targets1e2[0], []string{"PORT_STATE_OPEN"}),
})
tx1e2 := submitEpochReport(t, cli, n1.nodeName, epochID2, hostOpen, buildObs(assigned1e2.TargetSupernodeAccounts, ""))
RequireTxSuccess(t, tx1e2)

awaitAtLeastHeight(t, enforce2)
awaitAtLeastHeightWithSlackPeerPorts(t, enforce2)

require.Equal(t, "SUPERNODE_STATE_ACTIVE", querySupernodeLatestState(t, cli, n0.valAddr))
require.Equal(t, "SUPERNODE_STATE_POSTPONED", querySupernodeLatestState(t, cli, n1.valAddr))
Expand Down
Loading
Loading