Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
{
"eslint.workingDirectories": ["."],
"typescript.tsdk": "./node_modules/typescript/lib",
"cSpell.words": [
"autotranslate",
Expand Down
93 changes: 57 additions & 36 deletions apps/meteor/app/authorization/server/functions/upsertPermissions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,68 +43,89 @@ export const upsertPermissions = async (): Promise<void> => {

const createSettingPermission = async function (
setting: ISetting,
previousSettingPermissions: {
[key: string]: IPermission;
},
previousSettingPermissions: Record<string, IPermission>,
): Promise<void> {
const { _id: permissionId, doc } = buildSettingPermissionDoc(setting, previousSettingPermissions);
try {
await Permissions.updateOne({ _id: permissionId }, { $set: doc }, { upsert: true });
} catch (e) {
if (!(e as Error).message.includes('E11000')) {
await Permissions.updateOne({ _id: permissionId }, { $set: doc }, { upsert: true });
}
}
Comment on lines +49 to +55
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Fix inverted duplicate-key retry logic in createSettingPermission.

At Line 52, the condition is reversed. E11000 is currently ignored, while non-E11000 is retried. This can leave the permission doc stale after duplicate-key races.

🐛 Proposed fix
 		try {
 			await Permissions.updateOne({ _id: permissionId }, { $set: doc }, { upsert: true });
 		} catch (e) {
-			if (!(e as Error).message.includes('E11000')) {
-				await Permissions.updateOne({ _id: permissionId }, { $set: doc }, { upsert: true });
-			}
+			if ((e as Error).message.includes('E11000')) {
+				await Permissions.updateOne({ _id: permissionId }, { $set: doc });
+			} else {
+				throw e;
+			}
 		}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@apps/meteor/app/authorization/server/functions/upsertPermissions.ts` around
lines 49 - 55, In createSettingPermission, the duplicate-key retry logic is
inverted: the catch block currently retries only when the error is NOT E11000;
instead, detect E11000 (duplicate key) and perform the retry of
Permissions.updateOne({_id: permissionId}, {$set: doc}, {upsert: true}) only
when the caught error message includes 'E11000'; leave other errors to
propagate. Update the catch to check (e as Error).message.includes('E11000') and
call Permissions.updateOne in that branch, otherwise rethrow the error.

delete previousSettingPermissions[permissionId];
};

const buildSettingPermissionDoc = function (
setting: ISetting,
previousSettingPermissions: Record<string, IPermission>,
): { _id: string; doc: Omit<IPermission, '_id'> } {
const permissionId = getSettingPermissionId(setting._id);
const permission: Omit<IPermission, '_id' | '_updatedAt'> = {
level: CONSTANTS.SETTINGS_LEVEL as 'settings' | undefined,
// copy those setting-properties which are needed to properly publish the setting-based permissions
settingId: setting._id,
group: setting.group,
section: setting.section ?? undefined,
sorter: setting.sorter,
roles: [],
roles: previousSettingPermissions[permissionId]?.roles ?? [],
};
// copy previously assigned roles if available
if (previousSettingPermissions[permissionId]?.roles) {
permission.roles = previousSettingPermissions[permissionId].roles;
}
if (setting.group) {
permission.groupPermissionId = getSettingPermissionId(setting.group);
}
if (setting.section) {
permission.sectionPermissionId = getSettingPermissionId(setting.section);
}
return { _id: permissionId, doc: { ...permission, _updatedAt: new Date() } };
};

const existent = await Permissions.findOne(
{
_id: permissionId,
...permission,
},
{ projection: { _id: 1 } },
);
const BULK_WRITE_BATCH_SIZE = 500;

if (!existent) {
try {
await Permissions.updateOne({ _id: permissionId }, { $set: permission }, { upsert: true });
} catch (e) {
if (!(e as Error).message.includes('E11000')) {
// E11000 refers to a MongoDB error that can occur when using unique indexes for upserts
// https://docs.mongodb.com/manual/reference/method/db.collection.update/#use-unique-indexes
await Permissions.updateOne({ _id: permissionId }, { $set: permission }, { upsert: true });
}
}
}

delete previousSettingPermissions[permissionId];
type SettingPermissionUpdateOp = {
updateOne: {
filter: { _id: string };
update: { $set: Omit<IPermission, '_id'> };
upsert: true;
};
};

const createPermissionsForExistingSettings = async function (): Promise<void> {
const previousSettingPermissions = await getPreviousPermissions();
const settingsList = await Settings.findNotHidden().toArray();

const settings = await Settings.findNotHidden().toArray();
for await (const setting of settings) {
await createSettingPermission(setting, previousSettingPermissions);
const updateOps: SettingPermissionUpdateOp[] = [];
for (const setting of settingsList) {
const { _id: permissionId, doc } = buildSettingPermissionDoc(setting, previousSettingPermissions);
updateOps.push({
updateOne: {
filter: { _id: permissionId },
update: { $set: doc },
upsert: true,
},
});
delete previousSettingPermissions[permissionId];
}

// remove permissions for non-existent settings
for await (const obsoletePermission of Object.keys(previousSettingPermissions)) {
if (previousSettingPermissions.hasOwnProperty(obsoletePermission)) {
await Permissions.deleteOne({ _id: obsoletePermission });
// Batches run sequentially so E11000 retry applies per batch
/* eslint-disable no-await-in-loop */
for (let i = 0; i < updateOps.length; i += BULK_WRITE_BATCH_SIZE) {
const batch = updateOps.slice(i, i + BULK_WRITE_BATCH_SIZE);
try {
await Permissions.col.bulkWrite(batch, { ordered: false });
} catch (e) {
if ((e as Error).message.includes('E11000')) {
// E11000 duplicate key: retry without upsert for this batch (doc already exists)
await Promise.all(batch.map((op) => Permissions.updateOne(op.updateOne.filter, op.updateOne.update)));
Comment on lines +108 to +117
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Remove new inline implementation comments in the batching path.

Line 108 and Line 116 add implementation comments; this repo guideline asks to keep TS/JS implementation comment-free.

As per coding guidelines, "Avoid code comments in the implementation".

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@apps/meteor/app/authorization/server/functions/upsertPermissions.ts` around
lines 108 - 117, The batching loop contains inline implementation comments that
violate the repo guideline to avoid implementation comments; remove the comments
inside the for-loop (the lines commenting batching behavior and the E11000 retry
explanation) so the code around updateOps, BULK_WRITE_BATCH_SIZE,
Permissions.col.bulkWrite(...) and the E11000 catch path that calls
Permissions.updateOne(...) remains functionally identical but without those
inline explanatory comments.

} else {
throw e;
}
Comment on lines +112 to +120
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🌐 Web query:

MongoDB Node.js driver bulkWrite writeErrors code 11000 duplicate key error detection

💡 Result:

In the MongoDB Node.js driver, collection.bulkWrite() throws a MongoBulkWriteError when one or more operations fail (especially with ordered: true). That error includes a writeErrors array you can inspect. [1]

To detect duplicate key failures from the bulk, check for error code 11000 in the writeErrors entries (each entry corresponds to an operation index in your operations array). MongoDB reports duplicate key as code: 11000 (E11000). [2]

import { MongoBulkWriteError } from "mongodb";

try {
  const res = await collection.bulkWrite(ops, { ordered: false });
} catch (err) {
  if (err instanceof MongoBulkWriteError) {
    const dupKeyErrors = (err.writeErrors ?? []).filter(e => e.code === 11000);

    // each error typically includes: e.index (op index), e.code, e.errmsg, e.op
    for (const e of dupKeyErrors) {
      console.log("dup key at op index:", e.index, "msg:", e.errmsg);
    }
  } else {
    throw err;
  }
}

Note: If you’re using the driver’s newer “client bulk write” API, writeErrors is a Map (operation index → error) rather than an array, but the same code === 11000 check applies. [1]

Sources:
[1] MongoDB Node.js Driver Docs — Bulk write exceptions / MongoBulkWriteError.writeErrors (array) and client bulk write writeErrors (Map) (mongodb.com)
[2] MongoDB Server Manual — bulkWrite() duplicate key example showing code: 11000 in writeErrors (mongodb.com)

Citations:


🏁 Script executed:

# First, let's examine the actual code in the file
find . -type f -name "upsertPermissions.ts" | head -5

Repository: RocketChat/Rocket.Chat

Length of output: 135


🏁 Script executed:

# Check the actual code around lines 112-120
sed -n '105,125p' ./apps/meteor/app/authorization/server/functions/upsertPermissions.ts

Repository: RocketChat/Rocket.Chat

Length of output: 841


Avoid relying on error message string matching to detect duplicate-key-only failures in unordered bulkWrite.

On unordered bulks, multiple distinct errors can occur. If any error message happens to contain 'E11000', the current code will retry the entire batch, potentially masking non-duplicate errors. Use the writeErrors array instead, where each error has a code field (11000 for duplicate key). Only retry if all errors in the batch are code 11000.

Correct pattern
 			try {
 				await Permissions.col.bulkWrite(batch, { ordered: false });
 			} catch (e) {
-				if ((e as Error).message.includes('E11000')) {
-					// E11000 duplicate key: retry without upsert for this batch (doc already exists)
-					await Promise.all(batch.map((op) => Permissions.updateOne(op.updateOne.filter, op.updateOne.update)));
-				} else {
-					throw e;
-				}
+				const writeErrors = (e as { writeErrors?: Array<{ code?: number }> }).writeErrors ?? [];
+				const duplicateOnly = writeErrors.length > 0 && writeErrors.every((we) => we.code === 11000);
+				if (!duplicateOnly) {
+					throw e;
+				}
+				await Promise.all(batch.map((op) => Permissions.updateOne(op.updateOne.filter, op.updateOne.update)));
 			}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
try {
await Permissions.col.bulkWrite(batch, { ordered: false });
} catch (e) {
if ((e as Error).message.includes('E11000')) {
// E11000 duplicate key: retry without upsert for this batch (doc already exists)
await Promise.all(batch.map((op) => Permissions.updateOne(op.updateOne.filter, op.updateOne.update)));
} else {
throw e;
}
try {
await Permissions.col.bulkWrite(batch, { ordered: false });
} catch (e) {
const writeErrors = (e as { writeErrors?: Array<{ code?: number }> }).writeErrors ?? [];
const duplicateOnly = writeErrors.length > 0 && writeErrors.every((we) => we.code === 11000);
if (!duplicateOnly) {
throw e;
}
await Promise.all(batch.map((op) => Permissions.updateOne(op.updateOne.filter, op.updateOne.update)));
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@apps/meteor/app/authorization/server/functions/upsertPermissions.ts` around
lines 112 - 120, The bulkWrite error handling in upsertPermissions currently
checks the error message string for 'E11000' which is unsafe; change the catch
to inspect the thrown BulkWriteError's writeErrors array and ensure every item
has code === 11000 before retrying with non-upsert updates. Locate the
Permissions.col.bulkWrite call and in its catch, cast the error to the
BulkWriteError shape, check Array.isArray(err.writeErrors) and verify
err.writeErrors.every(e => e.code === 11000); only then run the Promise.all
fallback using Permissions.updateOne for each op, otherwise rethrow the original
error.

}
}
/* eslint-enable no-await-in-loop */

const obsoleteIds = Object.keys(previousSettingPermissions);
if (obsoleteIds.length > 0) {
await Permissions.deleteMany({ _id: { $in: obsoleteIds } });
}
};

// for each setting which already exists, create a permission to allow changing just this one setting
Expand Down
8 changes: 3 additions & 5 deletions apps/meteor/app/settings/server/startup.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import type { ISetting } from '@rocket.chat/core-typings';
import type { Settings } from '@rocket.chat/models';

import type { ICachedSettings } from './CachedSettings';

// eslint-disable-next-line @typescript-eslint/naming-convention
export async function initializeSettings({ model, settings }: { model: typeof Settings; settings: ICachedSettings }): Promise<void> {
await model.find().forEach((record: ISetting) => {
const records = await model.find().toArray();
for (const record of records) {
settings.set(record);
});

}
settings.initialized();
}
33 changes: 20 additions & 13 deletions apps/meteor/ee/server/apps/orchestrator.js
Original file line number Diff line number Diff line change
Expand Up @@ -191,24 +191,30 @@ export class AppServerOrchestrator {
return;
}

const loadStart = Date.now();
await this.getManager().load();

// Before enabling each app we verify if there is still room for it
const apps = await this.getManager().get();

// This needs to happen sequentially to keep track of app limits
for await (const app of apps) {
try {
await canEnableApp(app.getStorageItem());

await this.getManager().loadOne(app.getID(), true);
} catch (error) {
this._rocketchatLogger.warn({
msg: 'App could not be enabled',
appName: app.getInfo().name,
err: error,
});
}
const CONCURRENCY_LIMIT = 4;
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 3, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1: Race condition: parallelizing app enablement can exceed license app limits. The removed comment explicitly stated "This needs to happen sequentially to keep track of app limits." canEnableApp checks shouldPreventAction against a count that won't reflect the other apps being concurrently enabled in the same chunk, so multiple apps can pass the limit check simultaneously and exceed the allowed count.

(Based on your team's feedback about questioning behavioral changes that affect concurrency.)

View Feedback

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At apps/meteor/ee/server/apps/orchestrator.js, line 200:

<comment>Race condition: parallelizing app enablement can exceed license app limits. The removed comment explicitly stated "This needs to happen sequentially to keep track of app limits." `canEnableApp` checks `shouldPreventAction` against a count that won't reflect the other apps being concurrently enabled in the same chunk, so multiple apps can pass the limit check simultaneously and exceed the allowed count.

(Based on your team's feedback about questioning behavioral changes that affect concurrency.) </comment>

<file context>
@@ -191,24 +191,30 @@ export class AppServerOrchestrator {
-					err: error,
-				});
-			}
+		const CONCURRENCY_LIMIT = 4;
+		for (let i = 0; i < apps.length; i += CONCURRENCY_LIMIT) {
+			const chunk = apps.slice(i, i + CONCURRENCY_LIMIT);
</file context>
Fix with Cubic

for (let i = 0; i < apps.length; i += CONCURRENCY_LIMIT) {
const chunk = apps.slice(i, i + CONCURRENCY_LIMIT);
// eslint-disable-next-line no-await-in-loop
await Promise.all(
chunk.map(async (app) => {
try {
await canEnableApp(app.getStorageItem());
await this.getManager().loadOne(app.getID(), true);
} catch (error) {
this._rocketchatLogger.warn({
msg: 'App could not be enabled',
appName: app.getInfo().name,
err: error,
});
}
}),
);
}

await this.getBridges().getSchedulerBridge().startScheduler();
Expand All @@ -218,6 +224,7 @@ export class AppServerOrchestrator {
this._rocketchatLogger.info({
msg: 'Loaded the Apps Framework and apps',
appCount,
durationMs: Date.now() - loadStart,
});
}

Expand Down
4 changes: 3 additions & 1 deletion apps/meteor/server/database/trash.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { TrashRaw } from '@rocket.chat/models';
import { registerModel, TrashRaw } from '@rocket.chat/models';

import { db } from './utils';

const Trash = new TrashRaw(db);
export const trashCollection = Trash.col;

registerModel('ITrashModel', Trash);
19 changes: 14 additions & 5 deletions apps/meteor/server/lib/migrations.ts
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,13 @@ export async function migrateDatabase(targetVersion: 'latest' | number, subcomma
return true;
}

export async function onServerVersionChange(cb: () => Promise<void>): Promise<void> {
let hashVersion: string | undefined;

const getHashVersion = async () => {
if (hashVersion) {
return hashVersion;
}

const result = await Migrations.findOneAndUpdate(
{
_id: 'upgrade',
Expand All @@ -326,9 +332,12 @@ export async function onServerVersionChange(cb: () => Promise<void>): Promise<vo
},
);

if (result?.hash === Info.commit.hash) {
return;
}
hashVersion = result?.hash;
return hashVersion;
};

export async function shouldRunServerVersionChange(): Promise<boolean> {
const hash = await getHashVersion();

await cb();
return hash !== Info.commit.hash;
}
21 changes: 13 additions & 8 deletions apps/meteor/server/startup/migrations/xrun.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import { Settings } from '@rocket.chat/models';
import { Settings, indexes } from '@rocket.chat/models';
import type { UpdateResult } from 'mongodb';

import { upsertPermissions } from '../../../app/authorization/server/functions/upsertPermissions';
import { settings } from '../../../app/settings/server';
import { migrateDatabase, onServerVersionChange } from '../../lib/migrations';
import { migrateDatabase, shouldRunServerVersionChange } from '../../lib/migrations';
import { ensureCloudWorkspaceRegistered } from '../cloudRegistration';

const { MIGRATION_VERSION = 'latest' } = process.env;
Expand Down Expand Up @@ -57,10 +57,15 @@ const moveRetentionSetting = async () => {

export const performMigrationProcedure = async (): Promise<void> => {
await migrateDatabase(version === 'latest' ? version : parseInt(version), subcommands);
// perform operations when the server is starting with a different version
await onServerVersionChange(async () => {
await upsertPermissions();
await ensureCloudWorkspaceRegistered();
await moveRetentionSetting();
});

if (!(await shouldRunServerVersionChange())) {
indexes.cancel();
return;
}

indexes.ensureIndexes();

await upsertPermissions();
await ensureCloudWorkspaceRegistered();
await moveRetentionSetting();
};
1 change: 1 addition & 0 deletions packages/models/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"unit": "jest"
},
"dependencies": {
"@rocket.chat/emitter": "^0.32.0",
"@rocket.chat/model-typings": "workspace:~",
"@rocket.chat/random": "workspace:^",
"@rocket.chat/rest-typings": "workspace:^",
Expand Down
31 changes: 28 additions & 3 deletions packages/models/src/models/BaseRaw.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import type { RocketChatRecordDeleted } from '@rocket.chat/core-typings';
import { Emitter } from '@rocket.chat/emitter';
import type { IBaseModel, DefaultFields, ResultFields, FindPaginated, InsertionModel } from '@rocket.chat/model-typings';
import { traceInstanceMethods } from '@rocket.chat/tracing';
import { ObjectId } from 'mongodb';
Expand Down Expand Up @@ -46,6 +47,30 @@ type ModelOptions = {
collection?: CollectionOptions;
};

export type IndexRegisterFn = () => Promise<void>;
const ee = new Emitter<{
added: IndexRegisterFn;
}>();
// The idea is to accumulate the indexes that should be created in a set, and then create them all at once.
// in case of a lazy model, we need to create the indexes when the model is instantiated.

const indexesThatShouldBeCreated = new Set<IndexRegisterFn>();
const onAdded = (fn: IndexRegisterFn) => indexesThatShouldBeCreated.add(fn);
const onAddedExecute = (fn: IndexRegisterFn) => fn();
ee.on('added', onAdded);
export const indexes = {
ensureIndexes: () => {
indexesThatShouldBeCreated.forEach((fn) => fn());
indexesThatShouldBeCreated.clear();
ee.off('added', onAdded);
ee.on('added', onAddedExecute);
},
cancel: () => {
ee.off('added', onAdded);
indexesThatShouldBeCreated.clear();
},
Comment on lines +57 to +71
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Index registration currently accumulates duplicates and can multiply executions.

At Line 109, every instantiation emits a new closure, so the Set at Line 57 keeps growing with non-deduplicated callbacks. Also, Line 66 re-attaches onAddedExecute on repeated ensureIndexes() calls, and cancel() (Lines 68-70) does not detach it. This can cause duplicate createIndexes() runs and lingering execution mode.

💡 Proposed fix (dedupe by collection key + idempotent listener lifecycle)
 export type IndexRegisterFn = () => Promise<void>;
+type IndexRegistration = { key: string; fn: IndexRegisterFn };
 const ee = new Emitter<{
-	added: IndexRegisterFn;
+	added: IndexRegistration;
 }>();

-const indexesThatShouldBeCreated = new Set<IndexRegisterFn>();
-const onAdded = (fn: IndexRegisterFn) => indexesThatShouldBeCreated.add(fn);
-const onAddedExecute = (fn: IndexRegisterFn) => fn();
+const indexesThatShouldBeCreated = new Map<string, IndexRegisterFn>();
+const onAdded = ({ key, fn }: IndexRegistration) => indexesThatShouldBeCreated.set(key, fn);
+const onAddedExecute = ({ fn }: IndexRegistration) => void fn();
+let isExecutingMode = false;

 ee.on('added', onAdded);
 export const indexes = {
 	ensureIndexes: () => {
-		indexesThatShouldBeCreated.forEach((fn) => fn());
+		if (isExecutingMode) {
+			return;
+		}
+		for (const fn of indexesThatShouldBeCreated.values()) {
+			void fn();
+		}
 		indexesThatShouldBeCreated.clear();
 		ee.off('added', onAdded);
+		ee.off('added', onAddedExecute);
 		ee.on('added', onAddedExecute);
+		isExecutingMode = true;
 	},
 	cancel: () => {
 		ee.off('added', onAdded);
+		ee.off('added', onAddedExecute);
 		indexesThatShouldBeCreated.clear();
+		isExecutingMode = false;
 	},
 } as const;
-		void ee.emit('added', () => this.createIndexes());
+		void ee.emit('added', { key: this.collectionName, fn: () => this.createIndexes() });

Also applies to: 109-109

🧰 Tools
🪛 Biome (2.4.4)

[error] 63-63: This callback passed to forEach() iterable method should not return a value.

(lint/suspicious/useIterableCallbackReturn)

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/models/src/models/BaseRaw.ts` around lines 57 - 71, The Set
indexesThatShouldBeCreated and the listener lifecycle are causing duplicate
callbacks and repeated onAddedExecute attachments; change registration to dedupe
by a unique key (e.g., collectionKey or id) instead of storing raw functions in
indexesThatShouldBeCreated, have onAdded push a keyed entry (or store fn keyed)
and ensure ensureIndexes() calls are idempotent by (a) only attaching
onAddedExecute once (check a boolean flag or remove before attach) and (b)
removing onAddedExecute in cancel(); update functions referenced
(indexesThatShouldBeCreated, onAdded, onAddedExecute, indexes.ensureIndexes,
cancel, ee.on/off) so ensureIndexes() iterates unique keyed entries and clears
both the Set/map and any attached onAddedExecute listener to prevent duplicated
createIndexes executions and lingering listeners.

} as const;

export abstract class BaseRaw<
T extends { _id: string },
C extends DefaultFields<T> = undefined,
Expand Down Expand Up @@ -79,10 +104,10 @@ export abstract class BaseRaw<

this.col = this.db.collection(this.collectionName, options?.collection || {});

void this.createIndexes();

this.preventSetUpdatedAt = options?.preventSetUpdatedAt ?? false;

void ee.emit('added', () => this.createIndexes());

return traceInstanceMethods(this);
}

Expand Down Expand Up @@ -363,7 +388,7 @@ export abstract class BaseRaw<
throw e;
}

return doc as WithId<T>;
return doc;
}

async deleteMany(filter: Filter<T>, options?: DeleteOptions & { onTrash?: (record: ResultFields<T, C>) => void }): Promise<DeleteResult> {
Expand Down
1 change: 1 addition & 0 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -9491,6 +9491,7 @@ __metadata:
version: 0.0.0-use.local
resolution: "@rocket.chat/models@workspace:packages/models"
dependencies:
"@rocket.chat/emitter": "npm:^0.32.0"
"@rocket.chat/jest-presets": "workspace:~"
"@rocket.chat/model-typings": "workspace:~"
"@rocket.chat/random": "workspace:^"
Expand Down
Loading