diff --git a/.github/workflows/server-ci-nightly-race.yml b/.github/workflows/server-ci-nightly-race.yml new file mode 100644 index 00000000000..d206cf90efa --- /dev/null +++ b/.github/workflows/server-ci-nightly-race.yml @@ -0,0 +1,52 @@ +# Nightly race detector run. +# +# Runs all server tests with Go's -race detector enabled to catch +# data races. Runs sequentially (fullyparallel: false) on a 2-core +# runner to minimize resource usage — race detection adds significant +# overhead that makes parallel execution impractical. +# +# Schedule: Nightly at ~2am ET (6am UTC) +# +# Previously, -race was implicitly bundled with the binary params job +# via the fullyparallel=false → RACE_MODE coupling. This decouples +# the two concerns: binary params tests the Postgres driver encoding; +# this workflow tests for data races. +name: Server CI Nightly Race +on: + schedule: + - cron: "0 6 * * *" # Daily 6am UTC (~2am ET) + workflow_dispatch: # Manual trigger for on-demand race detection + +permissions: + contents: read + +jobs: + go: + name: Compute Go Version + runs-on: ubuntu-22.04 + outputs: + version: ${{ steps.calculate.outputs.GO_VERSION }} + steps: + - name: Checkout mattermost project + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: Calculate version + id: calculate + working-directory: server/ + run: echo GO_VERSION=$(cat .go-version) >> "${GITHUB_OUTPUT}" + + test-race: + name: Race Detector + needs: go + uses: ./.github/workflows/server-test-template.yml + secrets: inherit + with: + name: Race Detector + datasource: postgres://mmuser:mostest@postgres:5432/mattermost_test?sslmode=disable&connect_timeout=10 + drivername: postgres + logsartifact: race-detector-server-test-logs + go-version: ${{ needs.go.outputs.version }} + fips-enabled: false + fullyparallel: false + race-enabled: true + runner: ubuntu-22.04 + allow-failure: true diff --git a/.github/workflows/server-ci-weekly.yml b/.github/workflows/server-ci-weekly.yml new file mode 100644 index 00000000000..e3283c7ee55 --- /dev/null +++ b/.github/workflows/server-ci-weekly.yml @@ -0,0 +1,75 @@ +# Weekly Server CI jobs that don't need to run on every push/PR. +# These are important for compliance and compatibility but have low +# regression rates, so running them weekly saves significant 8-core +# runner capacity (9 fewer concurrent 8-core jobs per master push). +# +# Schedule: Monday ~1am ET (5am UTC) +# +# Jobs moved here from server-ci.yml: +# - Postgres with binary parameters (1x 8-core) +# - Postgres FIPS unsharded (1x 8-core) — sharded runs stay in server-ci.yml for PR iteration +# - mmctl FIPS tests (1x 8-core) +name: Server CI Weekly +on: + schedule: + - cron: "0 5 * * 1" # Monday 5am UTC (~1am ET) + push: + branches: + - 'release-*' + workflow_dispatch: # Allow manual trigger for urgent FIPS/binary verification + +permissions: + contents: read + +jobs: + go: + name: Compute Go Version + runs-on: ubuntu-22.04 + outputs: + version: ${{ steps.calculate.outputs.GO_VERSION }} + steps: + - name: Checkout mattermost project + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: Calculate version + id: calculate + working-directory: server/ + run: echo GO_VERSION=$(cat .go-version) >> "${GITHUB_OUTPUT}" + + test-postgres-binary: + name: Postgres with binary parameters + needs: go + uses: ./.github/workflows/server-test-template.yml + secrets: inherit + with: + name: Postgres with binary parameters + datasource: postgres://mmuser:mostest@postgres:5432/mattermost_test?sslmode=disable&connect_timeout=10&binary_parameters=yes + drivername: postgres + logsartifact: postgres-binary-server-test-logs + go-version: ${{ needs.go.outputs.version }} + fips-enabled: false + + test-postgres-normal-fips: + name: Postgres FIPS + needs: go + uses: ./.github/workflows/server-test-template.yml + secrets: inherit + with: + name: Postgres FIPS + datasource: postgres://mmuser:mostest-fips-test@postgres:5432/mattermost_test?sslmode=disable&connect_timeout=10 + drivername: postgres + logsartifact: postgres-server-fips-test-logs + go-version: ${{ needs.go.outputs.version }} + fips-enabled: true + + test-mmctl-fips: + name: Run mmctl tests (FIPS) + needs: go + uses: ./.github/workflows/mmctl-test-template.yml + secrets: inherit + with: + name: mmctl + datasource: postgres://mmuser:mostest-fips-test@postgres:5432/mattermost_test?sslmode=disable&connect_timeout=10 + drivername: postgres + logsartifact: mmctl-fips-test-logs + go-version: ${{ needs.go.outputs.version }} + fips-enabled: true diff --git a/.github/workflows/server-ci.yml b/.github/workflows/server-ci.yml index e325e7ddd4e..baaa6c6101f 100644 --- a/.github/workflows/server-ci.yml +++ b/.github/workflows/server-ci.yml @@ -195,20 +195,10 @@ jobs: echo "Making sure docs are updated" make mmctl-docs if [[ -n $(git status --porcelain) ]]; then echo "Please update the mmctl docs using make mmctl-docs"; exit 1; fi - test-postgres-binary: - if: github.event_name == 'push' # Only run postgres binary tests on master/release pushes: odds are low this regresses, so save the cycles for pull requests. - name: Postgres with binary parameters - needs: go - uses: ./.github/workflows/server-test-template.yml - secrets: inherit - with: - name: Postgres with binary parameters - datasource: postgres://mmuser:mostest@postgres:5432/mattermost_test?sslmode=disable&connect_timeout=10&binary_parameters=yes - drivername: postgres - logsartifact: postgres-binary-server-test-logs - go-version: ${{ needs.go.outputs.version }} - fips-enabled: false - fullyparallel: false + # NOTE: Postgres with binary parameters has been moved to server-ci-weekly.yml + # (runs Monday 1am EST / 5am UTC). Low regression risk doesn't justify + # consuming 8-core runners on every push. + # -- Sharded into 4 parallel runners for ~88% wall-time improvement -- test-postgres-normal: name: Postgres (shard ${{ matrix.shard }}) @@ -255,11 +245,11 @@ jobs: elasticsearch-version: "8.9.0" test-target: "test-server-elasticsearch" + # FIPS tests: run on PRs when go.mod changed or branch name contains "fips". + # Sharded for fast iteration. Weekly workflow provides regular full coverage. test-postgres-normal-fips: - # Always run on pushes to master/release branches. - # For PRs, run when the branch name contains "fips" or any go.mod was changed. - if: github.event_name == 'push' || contains(github.head_ref, 'fips') || needs.go.outputs.gomod-changed == 'true' - name: Postgres FIPS (shard ${{ matrix.shard }}) + if: contains(github.head_ref, 'fips') || needs.go.outputs.gomod-changed == 'true' + name: "Postgres FIPS (shard ${{ matrix.shard }})" needs: go strategy: fail-fast: false @@ -305,6 +295,7 @@ jobs: enablecoverage: true go-version: ${{ needs.go.outputs.version }} fips-enabled: false + # runner: ubuntu-22.04 # Coverage is non-blocking (allow-failure), no need for 8-core shard-index: ${{ matrix.shard }} shard-total: 4 test-mmctl: @@ -320,9 +311,8 @@ jobs: go-version: ${{ needs.go.outputs.version }} fips-enabled: false test-mmctl-fips: + if: contains(github.head_ref, 'fips') || needs.go.outputs.gomod-changed == 'true' name: Run mmctl tests (FIPS) - # Skip FIPS testing for forks, which won't have docker login credentials. - if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository needs: go uses: ./.github/workflows/mmctl-test-template.yml secrets: inherit @@ -330,9 +320,10 @@ jobs: name: mmctl datasource: postgres://mmuser:mostest-fips-test@postgres:5432/mattermost_test?sslmode=disable&connect_timeout=10 drivername: postgres - logsartifact: mmctl-test-logs + logsartifact: mmctl-fips-test-logs go-version: ${{ needs.go.outputs.version }} fips-enabled: true + build-mattermost-server: name: Build mattermost server app needs: go diff --git a/.github/workflows/server-test-template.yml b/.github/workflows/server-test-template.yml index 46dfe422533..1a16bb8472a 100644 --- a/.github/workflows/server-test-template.yml +++ b/.github/workflows/server-test-template.yml @@ -50,6 +50,16 @@ on: required: false type: number default: 1 + runner: + description: "GitHub-hosted runner label (default: ubuntu-latest-8-cores)" + required: false + type: string + default: "ubuntu-latest-8-cores" + race-enabled: + description: "Run tests with Go race detector (-race)" + required: false + type: boolean + default: false permissions: id-token: write @@ -58,7 +68,7 @@ permissions: jobs: test: name: ${{ inputs.name }} - runs-on: ubuntu-latest-8-cores + runs-on: ${{ inputs.runner }} continue-on-error: ${{ inputs.allow-failure }} # Used to avoid blocking PRs in case of flakiness env: COMPOSE_PROJECT_NAME: ghactions @@ -175,7 +185,7 @@ jobs: env: BUILD_IMAGE: ${{ steps.build.outputs.BUILD_IMAGE }} run: | - if [[ ${{ github.ref_name }} == 'master' && ${{ inputs.fullyparallel }} != true && "${{ inputs.test-target }}" == "test-server" ]]; then + if [[ "${{ inputs.race-enabled }}" == "true" ]]; then export RACE_MODE="-race" fi diff --git a/.github/workflows/yamllint.yml b/.github/workflows/yamllint.yml new file mode 100644 index 00000000000..2d4e7ec4bbb --- /dev/null +++ b/.github/workflows/yamllint.yml @@ -0,0 +1,28 @@ +name: YAML Lint + +on: + push: + branches: + - master + - release-* + pull_request: + paths: + - ".github/workflows/**" + - ".yamllint" + +concurrency: + group: ${{ github.event_name == 'pull_request' && format('{0}-{1}', github.workflow, github.ref) || github.run_id }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +permissions: + contents: read + +jobs: + yamllint: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + - name: Lint workflow YAML files + run: yamllint .github/workflows/ diff --git a/.yamllint b/.yamllint new file mode 100644 index 00000000000..894f11a78ed --- /dev/null +++ b/.yamllint @@ -0,0 +1,7 @@ +extends: default + +rules: + key-duplicates: enable + empty-lines: disable + line-length: disable + trailing-spaces: disable diff --git a/server/channels/api4/channel_test.go b/server/channels/api4/channel_test.go index af40f46d7f6..f81ddac25d4 100644 --- a/server/channels/api4/channel_test.go +++ b/server/channels/api4/channel_test.go @@ -5141,7 +5141,6 @@ func TestAddChannelMembers(t *testing.T) { func TestAddChannelMemberFromThread(t *testing.T) { mainHelper.Parallel(t) - t.Skip("MM-41285") th := Setup(t).InitBasic(t) team := th.BasicTeam user := th.BasicUser @@ -5191,12 +5190,11 @@ func TestAddChannelMemberFromThread(t *testing.T) { _, _, err = th.SystemAdminClient.AddChannelMemberWithRootId(context.Background(), publicChannel.Id, user.Id, rpost.Id) require.NoError(t, err) - // Threadmembership should exist for added user - ut, _, err := th.Client.GetUserThread(context.Background(), user.Id, team.Id, rpost.Id, false) - require.NoError(t, err) - // Should have two mentions. There might be a race condition - // here between the "added user to the channel" message and the GetUserThread call - require.LessOrEqual(t, int64(2), ut.UnreadMentions) + // Thread membership and mention counts may take a moment to propagate (MM-41285). + require.Eventually(t, func() bool { + ut, _, getErr := th.Client.GetUserThread(context.Background(), user.Id, team.Id, rpost.Id, false) + return getErr == nil && ut.UnreadMentions >= 2 + }, 10*time.Second, 200*time.Millisecond, "expected at least 2 unread mentions in thread") var caught bool func() { @@ -5215,7 +5213,7 @@ func TestAddChannelMemberFromThread(t *testing.T) { require.EqualValues(t, float64(0), data["previous_unread_replies"]) require.EqualValues(t, float64(0), data["previous_unread_mentions"]) } - case <-time.After(2 * time.Second): + case <-time.After(15 * time.Second): return } } diff --git a/server/channels/api4/oauth_test.go b/server/channels/api4/oauth_test.go index feb3e7e06fe..9080d862629 100644 --- a/server/channels/api4/oauth_test.go +++ b/server/channels/api4/oauth_test.go @@ -85,7 +85,7 @@ func TestCreateOAuthApp(t *testing.T) { } func TestUpdateOAuthApp(t *testing.T) { - t.Skip("https://mattermost.atlassian.net/browse/MM-62895") + // MM-62895: re-enabled to collect failure data (14mo, empty Jira description). mainHelper.Parallel(t) diff --git a/server/channels/api4/post_test.go b/server/channels/api4/post_test.go index 4dba37c82f1..31522e02d75 100644 --- a/server/channels/api4/post_test.go +++ b/server/channels/api4/post_test.go @@ -4614,7 +4614,6 @@ func TestSearchPostsWithDateFlags(t *testing.T) { } func TestGetFileInfosForPost(t *testing.T) { - t.Skip("MM-46902") th := Setup(t).InitBasic(t) client := th.Client @@ -4622,17 +4621,22 @@ func TestGetFileInfosForPost(t *testing.T) { data, err := testutils.ReadTestFile("test.png") require.NoError(t, err) for i := range 3 { - fileResp, _, _ := client.UploadFile(context.Background(), data, th.BasicChannel.Id, "test.png") + fileResp, _, uploadErr := client.UploadFile(context.Background(), data, th.BasicChannel.Id, "test.png") + require.NoError(t, uploadErr, "failed to upload file %d", i) fileIds[i] = fileResp.FileInfos[0].Id } post := &model.Post{ChannelId: th.BasicChannel.Id, Message: "zz" + model.NewId() + "a", FileIds: fileIds} - post, _, _ = client.CreatePost(context.Background(), post) - - infos, resp, err := client.GetFileInfosForPost(context.Background(), post.Id, "") + post, _, err = client.CreatePost(context.Background(), post) require.NoError(t, err) - require.Len(t, infos, 3, "missing file infos") + // File info association can be async; poll until all 3 are visible (MM-46902). + var infos []*model.FileInfo + var resp *model.Response + require.Eventually(t, func() bool { + infos, resp, err = client.GetFileInfosForPost(context.Background(), post.Id, "") + return err == nil && len(infos) == 3 + }, 10*time.Second, 200*time.Millisecond, "expected 3 file infos") found := false for _, info := range infos { @@ -5326,14 +5330,14 @@ func TestGetPostStripActionIntegrations(t *testing.T) { } func TestPostReminder(t *testing.T) { - t.Skip("MM-60329") - th := Setup(t).InitBasic(t) client := th.Client userWSClient := th.CreateConnectedWebSocketClient(t) - targetTime := time.Now().UTC().Unix() + // Set target time 1s in the future so the reminder processor picks it up + // on its next tick rather than relying on it already being in the past (MM-60329). + targetTime := time.Now().UTC().Unix() + 1 resp, err := client.SetPostReminder(context.Background(), &model.PostReminder{ TargetTime: targetTime, PostId: th.BasicPost.Id, @@ -5374,7 +5378,7 @@ func TestPostReminder(t *testing.T) { require.Equal(t, th.BasicTeam.Name, parsedPost.GetProp("team_name").(string)) return } - case <-time.After(5 * time.Second): + case <-time.After(15 * time.Second): return } } diff --git a/server/channels/api4/shared_channel_metadata_test.go b/server/channels/api4/shared_channel_metadata_test.go index f2052431482..2d45a821ecc 100644 --- a/server/channels/api4/shared_channel_metadata_test.go +++ b/server/channels/api4/shared_channel_metadata_test.go @@ -6,6 +6,7 @@ package api4 import ( "net/http" "net/http/httptest" + "sync" "testing" "time" @@ -114,8 +115,8 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { th, service := setupTestEnvironment(t) t.Run("Post Priority Metadata Self-Referential Sync", func(t *testing.T) { - t.Skip("MM-64687") var syncedPosts []*model.Post + var mu sync.Mutex // Create test HTTP server using self-referential approach testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -135,7 +136,9 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { post.Metadata.Priority.RequestedAck, post.Metadata.Priority.PersistentNotifications) } + mu.Lock() syncedPosts = append(syncedPosts, post) + mu.Unlock() } // Update test server to use the sync handler @@ -165,12 +168,16 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // Wait for sync completion using Eventually pattern require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(syncedPosts) >= 2 }, 5*time.Second, 100*time.Millisecond, "Should receive synced posts via self-referential handler") // Verify priority metadata is preserved through the complete sync flow + mu.Lock() t.Logf("Found %d synced posts", len(syncedPosts)) syncedPost := syncedPosts[len(syncedPosts)-1] + mu.Unlock() require.NotNil(t, syncedPost.Metadata, "Post metadata should be preserved") require.NotNil(t, syncedPost.Metadata.Priority, "Priority metadata should be preserved") assert.Equal(t, model.PostPriorityUrgent, *syncedPost.Metadata.Priority.Priority, "Priority should be preserved") @@ -181,6 +188,7 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { t.Run("Post Acknowledgement Metadata Self-Referential Sync", func(t *testing.T) { EnsureCleanState(t, th, th.App.Srv().Store()) var syncedPosts []*model.Post + var mu sync.Mutex testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { writeOKResponse(w) @@ -191,7 +199,9 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { syncHandler := NewSelfReferentialSyncHandler(t, service, selfCluster) syncHandler.OnPostSync = func(post *model.Post) { + mu.Lock() syncedPosts = append(syncedPosts, post) + mu.Unlock() } testServer.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -227,11 +237,15 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // Wait for sync completion require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(syncedPosts) >= 2 }, 5*time.Second, 100*time.Millisecond, "Should receive synced posts via self-referential handler") // Verify acknowledgement metadata is preserved + mu.Lock() syncedPost := syncedPosts[len(syncedPosts)-1] + mu.Unlock() require.NotNil(t, syncedPost.Metadata, "Post metadata should be preserved") require.NotNil(t, syncedPost.Metadata.Priority, "Priority metadata should be preserved") assert.Equal(t, model.PostPriorityUrgent, *syncedPost.Metadata.Priority.Priority, "Priority should be preserved") @@ -240,10 +254,10 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { }) t.Run("Acknowledgement Count Sync Back to Sender", func(t *testing.T) { - t.Skip("MM-64687") EnsureCleanState(t, th, th.App.Srv().Store()) var syncedPosts []*model.Post var postIdToSync string + var mu sync.Mutex testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { writeOKResponse(w) @@ -254,6 +268,8 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { syncHandler := NewSelfReferentialSyncHandler(t, service, selfCluster) syncHandler.OnPostSync = func(post *model.Post) { + mu.Lock() + defer mu.Unlock() if post.Id == postIdToSync { t.Logf("Received sync for target post: ID=%s, HasAcks=%v", post.Id, post.Metadata != nil && post.Metadata.Acknowledgements != nil) @@ -294,6 +310,8 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // Wait for initial sync require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(syncedPosts) >= 2 }, 5*time.Second, 100*time.Millisecond, "Should complete initial sync") @@ -308,11 +326,15 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { require.Nil(t, appErr) // Clear previous synced posts and trigger sync + mu.Lock() syncedPosts = syncedPosts[:0] + mu.Unlock() service.NotifyChannelChanged(testChannel.Id) // Wait for acknowledgement sync require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() for _, post := range syncedPosts { if post.Id == postIdToSync && post.Metadata != nil && @@ -325,6 +347,7 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { }, 5*time.Second, 100*time.Millisecond, "Should sync acknowledgements back") // Verify acknowledgement was synced + mu.Lock() var syncedPostWithAcks *model.Post for _, post := range syncedPosts { if post.Id == postIdToSync && post.Metadata != nil && post.Metadata.Acknowledgements != nil { @@ -332,6 +355,7 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { break } } + mu.Unlock() require.NotNil(t, syncedPostWithAcks, "Should find synced post with acknowledgements") require.NotNil(t, syncedPostWithAcks.Metadata.Acknowledgements, "Acknowledgements should exist") @@ -344,9 +368,9 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { }) t.Run("Persistent Notifications Self-Referential Sync", func(t *testing.T) { - t.Skip("MM-64687") EnsureCleanState(t, th, th.App.Srv().Store()) var syncedPosts []*model.Post + var mu sync.Mutex testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { writeOKResponse(w) @@ -357,7 +381,9 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { syncHandler := NewSelfReferentialSyncHandler(t, service, selfCluster) syncHandler.OnPostSync = func(post *model.Post) { + mu.Lock() syncedPosts = append(syncedPosts, post) + mu.Unlock() } testServer.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -384,11 +410,15 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // Wait for sync completion require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(syncedPosts) >= 2 }, 5*time.Second, 100*time.Millisecond, "Should receive synced posts via self-referential handler") // Verify persistent notifications setting is preserved + mu.Lock() syncedPost := syncedPosts[len(syncedPosts)-1] + mu.Unlock() require.NotNil(t, syncedPost.Metadata, "Post metadata should be preserved") require.NotNil(t, syncedPost.Metadata.Priority, "Priority metadata should be preserved") assert.Equal(t, model.PostPriorityUrgent, *syncedPost.Metadata.Priority.Priority, "Priority should be preserved") @@ -397,11 +427,11 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { }) t.Run("Cross-Cluster Acknowledgement End-to-End Flow", func(t *testing.T) { - t.Skip("MM-64687") EnsureCleanState(t, th, th.App.Srv().Store()) var syncedPostsServerA []*model.Post var syncedPostsServerB []*model.Post var postIdToTrack string + var muA, muB sync.Mutex // Create test HTTP servers for both "clusters" testServerA := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -502,6 +532,8 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // Initialize sync handlers for both clusters syncHandlerA := NewSelfReferentialSyncHandler(t, service, clusterA) syncHandlerA.OnPostSync = func(post *model.Post) { + muA.Lock() + defer muA.Unlock() t.Logf("Cluster A received sync: ID=%s, Message=%s, HasAcks=%v", post.Id, post.Message, post.Metadata != nil && post.Metadata.Acknowledgements != nil) @@ -513,6 +545,8 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { syncHandlerB := NewSelfReferentialSyncHandler(t, service, clusterB) syncHandlerB.OnPostSync = func(post *model.Post) { + muB.Lock() + defer muB.Unlock() t.Logf("Cluster B received sync: ID=%s, Message=%s, RequestedAck=%v", post.Id, post.Message, post.Metadata != nil && post.Metadata.Priority != nil && post.Metadata.Priority.RequestedAck != nil && *post.Metadata.Priority.RequestedAck) @@ -557,6 +591,8 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // Wait for Server B to receive the post var syncedPostIdOnServerB string require.Eventually(t, func() bool { + muB.Lock() + defer muB.Unlock() for _, post := range syncedPostsServerB { if post.Message == originalPost.Message && post.Metadata != nil && post.Metadata.Priority != nil && post.Metadata.Priority.RequestedAck != nil && *post.Metadata.Priority.RequestedAck { @@ -588,14 +624,20 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // STEP 4: Acknowledgement syncs back from Server B to Server A t.Log("=== STEP 4: Acknowledgement syncs back from Server B to Server A ===") // Clear previous sync data to focus on acknowledgement sync + muA.Lock() syncedPostsServerA = syncedPostsServerA[:0] + muA.Unlock() + muB.Lock() syncedPostsServerB = syncedPostsServerB[:0] + muB.Unlock() // Trigger sync to send acknowledgement back to Server A service.NotifyChannelChanged(testChannel.Id) // Wait for Server A to receive the acknowledgement sync require.Eventually(t, func() bool { + muA.Lock() + defer muA.Unlock() for _, post := range syncedPostsServerA { if post.Id == postIdToTrack && post.Metadata != nil && post.Metadata.Acknowledgements != nil { t.Logf("Server A received post %s with %d acknowledgements", post.Id, len(post.Metadata.Acknowledgements)) @@ -607,6 +649,7 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // STEP 5: Verify the complete acknowledgement flow t.Log("=== STEP 5: Verify complete acknowledgement flow ===") + muA.Lock() var serverAPostWithAcks *model.Post for _, post := range syncedPostsServerA { if post.Id == postIdToTrack && post.Metadata != nil && post.Metadata.Acknowledgements != nil { @@ -614,6 +657,7 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { break } } + muA.Unlock() require.NotNil(t, serverAPostWithAcks, "Server A should receive post with acknowledgements") require.NotNil(t, serverAPostWithAcks.Metadata.Acknowledgements, "Acknowledgements should exist") @@ -632,13 +676,17 @@ func TestSharedChannelPostMetadataSync(t *testing.T) { // STEP 6: Test echo prevention - verify no duplicate acknowledgements t.Log("=== STEP 6: Test echo prevention ===") + muA.Lock() syncedPostsServerA = syncedPostsServerA[:0] + muA.Unlock() // Trigger another sync to ensure no duplicates are created service.NotifyChannelChanged(testChannel.Id) // Verify acknowledgement count remains 1 (no duplicates) require.Eventually(t, func() bool { + muA.Lock() + defer muA.Unlock() for _, post := range syncedPostsServerA { if post.Id == postIdToTrack && post.Metadata != nil && post.Metadata.Acknowledgements != nil { return len(post.Metadata.Acknowledgements) == 1 diff --git a/server/channels/api4/status_test.go b/server/channels/api4/status_test.go index 6ea5e593ef5..58a01a92846 100644 --- a/server/channels/api4/status_test.go +++ b/server/channels/api4/status_test.go @@ -54,7 +54,6 @@ func TestGetUserStatus(t *testing.T) { }) t.Run("dnd status timed restore after time interval", func(t *testing.T) { - t.Skip("https://mattermost.atlassian.net/browse/MM-63533") task := model.CreateRecurringTaskFromNextIntervalTime("Unset DND Statuses From Test", th.App.UpdateDNDStatusOfUsers, 1*time.Second) defer task.Cancel() th.App.SetStatusOnline(th.BasicUser.Id, true) @@ -65,10 +64,12 @@ func TestGetUserStatus(t *testing.T) { userStatus, _, err = client.GetUserStatus(context.Background(), th.BasicUser.Id, "") require.NoError(t, err) assert.Equal(t, "dnd", userStatus.Status) - time.Sleep(3 * time.Second) - userStatus, _, err = client.GetUserStatus(context.Background(), th.BasicUser.Id, "") - require.NoError(t, err) - assert.Equal(t, "online", userStatus.Status) + // Poll for status restore instead of sleeping a fixed duration (MM-63533). + // The recurring task runs every 1s but can lag under CI load. + require.Eventually(t, func() bool { + userStatus, _, err = client.GetUserStatus(context.Background(), th.BasicUser.Id, "") + return err == nil && userStatus.Status == "online" + }, 15*time.Second, 500*time.Millisecond, "DND status was not restored to online within timeout") }) t.Run("back to offline status", func(t *testing.T) { diff --git a/server/channels/api4/user_test.go b/server/channels/api4/user_test.go index fbf844e6330..b899dbdb072 100644 --- a/server/channels/api4/user_test.go +++ b/server/channels/api4/user_test.go @@ -1855,7 +1855,10 @@ func TestAutocompleteUsersInChannel(t *testing.T) { }) t.Run("Check OutOfChannel results with/without VIEW_MEMBERS permissions", func(t *testing.T) { - t.Skip("https://mattermost.atlassian.net/browse/MM-61041") + // MM-61041: Re-enabled to collect failure data (17mo, "Broken Test"). + // This subtest is fragile by design — shares th.Client with the parent, + // mutates global permissions/license/config. If it still fails, rewrite + // as focused app-layer unit tests rather than fixing shared state. th.App.UpdateConfig(func(cfg *model.Config) { *cfg.GuestAccountsSettings.Enable = true }) th.App.Srv().SetLicense(model.NewTestLicense()) @@ -3910,8 +3913,6 @@ func TestUpdateUserHashedPassword(t *testing.T) { } func TestResetPassword(t *testing.T) { - t.Skip("test disabled during old build server changes, should be investigated") - th := Setup(t).InitBasic(t) _, err := th.Client.Logout(context.Background()) require.NoError(t, err) diff --git a/server/channels/app/channel_test.go b/server/channels/app/channel_test.go index 776c2d2ab07..37dbf17df76 100644 --- a/server/channels/app/channel_test.go +++ b/server/channels/app/channel_test.go @@ -711,7 +711,6 @@ func TestGetDirectChannelCreatesChannelMemberHistoryRecord(t *testing.T) { } func TestAddUserToChannelCreatesChannelMemberHistoryRecord(t *testing.T) { - t.Skip("MM-67041") mainHelper.Parallel(t) th := Setup(t).InitBasic(t).DeleteBots(t) @@ -726,11 +725,13 @@ func TestAddUserToChannelCreatesChannelMemberHistoryRecord(t *testing.T) { channel := th.createChannel(t, th.BasicTeam, model.ChannelTypeOpen) + startTime := model.GetMillis() _, appErr = th.App.AddUserToChannel(th.Context, user, channel, false) require.Nil(t, appErr, "Failed to add user to channel.") // there should be a ChannelMemberHistory record for the user - histories, nErr := th.App.Srv().Store().ChannelMemberHistory().GetUsersInChannelDuring(model.GetMillis()-100, model.GetMillis()+100, []string{channel.Id}) + // Use a wide time window (±10s) to avoid flaky failures under CI load (MM-67041) + histories, nErr := th.App.Srv().Store().ChannelMemberHistory().GetUsersInChannelDuring(startTime-10000, model.GetMillis()+10000, []string{channel.Id}) require.NoError(t, nErr) assert.Len(t, histories, 2) channelMemberHistoryUserIds := make([]string, 0) @@ -971,7 +972,6 @@ func TestLeaveLastChannel(t *testing.T) { } func TestAddChannelMemberNoUserRequestor(t *testing.T) { - t.Skip("MM-67037") mainHelper.Parallel(t) th := Setup(t).InitBasic(t) @@ -986,11 +986,13 @@ func TestAddChannelMemberNoUserRequestor(t *testing.T) { channel := th.createChannel(t, th.BasicTeam, model.ChannelTypeOpen) + startTime := model.GetMillis() _, appErr = th.App.AddChannelMember(th.Context, user.Id, channel, ChannelMemberOpts{}) require.Nil(t, appErr, "Failed to add user to channel.") // there should be a ChannelMemberHistory record for the user - histories, nErr := th.App.Srv().Store().ChannelMemberHistory().GetUsersInChannelDuring(model.GetMillis()-100, model.GetMillis()+100, []string{channel.Id}) + // Use a wide time window (±10s) to avoid flaky failures under CI load (MM-67037) + histories, nErr := th.App.Srv().Store().ChannelMemberHistory().GetUsersInChannelDuring(startTime-10000, model.GetMillis()+10000, []string{channel.Id}) require.NoError(t, nErr) assert.Len(t, histories, 2) channelMemberHistoryUserIds := make([]string, 0) diff --git a/server/channels/app/shared_channel_membership_sync_self_referential_test.go b/server/channels/app/shared_channel_membership_sync_self_referential_test.go index 991acc86725..80e892f53fc 100644 --- a/server/channels/app/shared_channel_membership_sync_self_referential_test.go +++ b/server/channels/app/shared_channel_membership_sync_self_referential_test.go @@ -518,7 +518,6 @@ func TestSharedChannelMembershipSyncSelfReferential(t *testing.T) { assert.Contains(t, syncedInSecondCall, user3.Id, "Second sync must include the new user") }) t.Run("Test 4: Sync failure and recovery", func(t *testing.T) { - t.Skip("MM-64687") // This test verifies that membership sync handles remote server failures gracefully // and successfully syncs members once the remote server recovers. // We test that: @@ -530,6 +529,7 @@ func TestSharedChannelMembershipSyncSelfReferential(t *testing.T) { var failureMode atomic.Bool failureMode.Store(true) var successfulSyncs []string + var syncsMu sync.Mutex var selfCluster *model.RemoteCluster var syncHandler *SelfReferentialSyncHandler @@ -597,10 +597,14 @@ func TestSharedChannelMembershipSyncSelfReferential(t *testing.T) { // Initialize sync handler with callbacks syncHandler = NewSelfReferentialSyncHandler(t, service, selfCluster) syncHandler.OnBatchSync = func(userIds []string, messageNumber int32) { + syncsMu.Lock() successfulSyncs = append(successfulSyncs, userIds...) + syncsMu.Unlock() } syncHandler.OnIndividualSync = func(userId string, messageNumber int32) { + syncsMu.Lock() successfulSyncs = append(successfulSyncs, userId) + syncsMu.Unlock() } // Add a user to sync @@ -621,7 +625,9 @@ func TestSharedChannelMembershipSyncSelfReferential(t *testing.T) { initialAttempts := syncAttempts.Load() assert.Greater(t, initialAttempts, int32(0), "Should have attempted sync") + syncsMu.Lock() assert.Empty(t, successfulSyncs, "No successful syncs during failure mode") + syncsMu.Unlock() // Recover from failure mode failureMode.Store(false) @@ -631,6 +637,8 @@ func TestSharedChannelMembershipSyncSelfReferential(t *testing.T) { // Wait for successful sync with more robust checking require.Eventually(t, func() bool { + syncsMu.Lock() + defer syncsMu.Unlock() return slices.Contains(successfulSyncs, testUser.Id) }, 15*time.Second, 100*time.Millisecond, "Should have successful sync after recovery") diff --git a/server/channels/store/storetest/file_info_store.go b/server/channels/store/storetest/file_info_store.go index 72c315b3947..b7c581a8986 100644 --- a/server/channels/store/storetest/file_info_store.go +++ b/server/channels/store/storetest/file_info_store.go @@ -871,18 +871,20 @@ func testFileInfoGetStorageUsage(t *testing.T, rctx request.CTX, ss store.Store) } func testGetUptoNSizeFileTime(t *testing.T, rctx request.CTX, ss store.Store, s SqlStore) { - t.Skip("MM-53905") - _, err := ss.FileInfo().GetUptoNSizeFileTime(0) assert.Error(t, err) _, err = ss.FileInfo().GetUptoNSizeFileTime(-1) assert.Error(t, err) - _, err = ss.FileInfo().PermanentDeleteBatch(rctx, model.GetMillis(), 100000) + // Delete all existing file infos so parallel tests don't interfere with + // the cumulative size calculation (MM-53905). + _, err = ss.FileInfo().PermanentDeleteBatch(rctx, model.GetMillis()+3600000, 100000) require.NoError(t, err) + // Use far-future timestamps so these are always "most recent" even if + // parallel tests create file infos concurrently. diff := int64(10000) - now := utils.MillisFromTime(time.Now()) + diff + now := utils.MillisFromTime(time.Now()) + 3600000 // 1 hour in the future f1, err := ss.FileInfo().Save(rctx, &model.FileInfo{ PostId: model.NewId(), diff --git a/server/channels/store/storetest/post_store.go b/server/channels/store/storetest/post_store.go index 300fb56ba85..fd1b356ffee 100644 --- a/server/channels/store/storetest/post_store.go +++ b/server/channels/store/storetest/post_store.go @@ -5474,27 +5474,30 @@ func getPostIds(posts []*model.Post, morePosts ...*model.Post) []string { } func testGetNthRecentPostTime(t *testing.T, rctx request.CTX, ss store.Store) { - t.Skip("https://mattermost.atlassian.net/browse/MM-64438") - _, err := ss.Post().GetNthRecentPostTime(0) assert.Error(t, err) _, err = ss.Post().GetNthRecentPostTime(-1) assert.Error(t, err) + // Use timestamps 1 hour in the future so these posts are always "most recent" + // regardless of what parallel tests create concurrently (MM-64438). diff := int64(10000) - now := utils.MillisFromTime(time.Now()) + diff + now := utils.MillisFromTime(time.Now()) + 3600000 + + userId := model.NewId() + channelId := model.NewId() p1 := &model.Post{} - p1.ChannelId = model.NewId() - p1.UserId = model.NewId() + p1.ChannelId = channelId + p1.UserId = userId p1.Message = "test" p1.CreateAt = now p1, err = ss.Post().Save(rctx, p1) require.NoError(t, err) p2 := &model.Post{} - p2.ChannelId = p1.ChannelId - p2.UserId = p1.UserId + p2.ChannelId = channelId + p2.UserId = userId p2.Message = p1.Message now = now + diff p2.CreateAt = now @@ -5512,16 +5515,16 @@ func testGetNthRecentPostTime(t *testing.T, rctx request.CTX, ss store.Store) { b1 := &model.Post{} b1.Message = "bot test" - b1.ChannelId = p1.ChannelId + b1.ChannelId = channelId b1.UserId = bot1.UserId now = now + diff b1.CreateAt = now - _, err = ss.Post().Save(rctx, b1) + b1, err = ss.Post().Save(rctx, b1) require.NoError(t, err) p3 := &model.Post{} - p3.ChannelId = p1.ChannelId - p3.UserId = p1.UserId + p3.ChannelId = channelId + p3.UserId = userId p3.Message = p1.Message now = now + diff p3.CreateAt = now @@ -5530,23 +5533,32 @@ func testGetNthRecentPostTime(t *testing.T, rctx request.CTX, ss store.Store) { s1 := &model.Post{} s1.Type = model.PostTypeJoinChannel - s1.ChannelId = p1.ChannelId + s1.ChannelId = channelId s1.UserId = model.NewId() s1.Message = "system_join_channel message" now = now + diff s1.CreateAt = now - _, err = ss.Post().Save(rctx, s1) + s1, err = ss.Post().Save(rctx, s1) require.NoError(t, err) p4 := &model.Post{} - p4.ChannelId = p1.ChannelId - p4.UserId = p1.UserId + p4.ChannelId = channelId + p4.UserId = userId p4.Message = p1.Message now = now + diff p4.CreateAt = now p4, err = ss.Post().Save(rctx, p4) require.NoError(t, err) + // Clean up far-future posts so they don't leak into other tests (per review feedback). + defer func() { + for _, p := range []*model.Post{p1, p2, b1, p3, s1, p4} { + if delErr := ss.Post().PermanentDelete(rctx, p.Id); delErr != nil { + t.Logf("failed to delete post %s: %v", p.Id, delErr) + } + } + }() + r, err := ss.Post().GetNthRecentPostTime(1) assert.NoError(t, err) assert.Equal(t, p4.CreateAt, r) diff --git a/server/channels/store/storetest/thread_store.go b/server/channels/store/storetest/thread_store.go index e77bb26ccd6..7baf2ae1a7c 100644 --- a/server/channels/store/storetest/thread_store.go +++ b/server/channels/store/storetest/thread_store.go @@ -418,8 +418,24 @@ func testThreadStorePopulation(t *testing.T, rctx request.CTX, ss store.Store) { } }) t.Run("Get unread reply counts for thread", func(t *testing.T) { - t.Skip("MM-41797") + // Create posts with explicit timestamp spacing to avoid timestamp + // collisions that make MarkAsRead boundaries ambiguous (MM-41797). + // makeSomePosts doesn't set CreateAt, so posts can share the same + // millisecond, causing MarkAsRead(rootPost.CreateAt) to also mark + // the replies as read. newPosts := makeSomePosts(false) + + // Ensure replies have later timestamps than the root post. + // The root post is newPosts[0], replies are newPosts[1] and newPosts[2]. + baseTime := newPosts[0].CreateAt + for i := 1; i < len(newPosts); i++ { + if newPosts[i].RootId != "" && newPosts[i].CreateAt <= baseTime { + newPosts[i].CreateAt = baseTime + int64(i)*1000 + _, sErr := ss.Post().Overwrite(rctx, newPosts[i]) + require.NoError(t, sErr, "failed to update post timestamp") + } + } + opts := store.ThreadMembershipOpts{ Following: true, IncrementMentions: false,