Skip to content

Commit

Permalink
fix: ensure hash set on logs (this needs a good rewrite)
Browse files Browse the repository at this point in the history
  • Loading branch information
titanism committed Feb 27, 2025
1 parent 7403577 commit bc1ba56
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 9 deletions.
22 changes: 14 additions & 8 deletions app/models/logs.js
Original file line number Diff line number Diff line change
Expand Up @@ -332,8 +332,8 @@ for (const index of PARTIAL_INDICES) {
// (this helps with server-side log saving, and for client-side we parseErr in advance)
//
Logs.pre('validate', function (next) {
if (_.isError(this.err)) this.err = parseErr(this.err);
if (_.isError(this.meta.err)) this.meta.err = parseErr(this.meta.err);
if (_.isError(this?.err)) this.err = parseErr(this.err);
if (_.isError(this?.meta?.err)) this.meta.err = parseErr(this.meta.err);
next();
});

Expand Down Expand Up @@ -555,13 +555,18 @@ Logs.pre('validate', function (next) {
//
// eslint-disable-next-line complexity
function getQueryHash(log) {
if (log.hash) return log.hash;

//
// if log.meta.ignore_hook is explicity false
// then that means we want to definitely log the error
// (in future we could use a different field to denote unique hash)
//
if (log?.meta?.ignore_hook === false) return revHash(safeStringify(log));

if (log?.meta?.session?.isAllowlisted && log?.meta?.session?.fingerprint)
return log.meta.session.fingerprint;

const set = new Set();
//
// prepare db query for uniqueness
Expand Down Expand Up @@ -647,7 +652,9 @@ function getQueryHash(log) {
// log.meta.level (log level)
//
const $gte =
log?.meta?.level && ['error', 'fatal'].includes(log.meta.level)
log.created_at &&
log?.meta?.level &&
['error', 'fatal'].includes(log.meta.level)
? dayjs(new Date(log.created_at)).startOf('hour').toDate()
: dayjs(new Date(log.created_at)).startOf('day').toDate();

Expand Down Expand Up @@ -699,7 +706,7 @@ function getQueryHash(log) {

// if it was not an HTTP request then include date query
// (we don't want the server itself to pollute the db on its own)
set.add($gte);
if ($gte) set.add($gte);
}

// if it was a code bug
Expand Down Expand Up @@ -756,6 +763,8 @@ function getQueryHash(log) {
return revHash(safeStringify([...set]));
}

Logs.statics.getQueryHash = getQueryHash;

Logs.pre('validate', function (next) {
try {
// get query hash
Expand All @@ -767,10 +776,7 @@ Logs.pre('validate', function (next) {
//
// TODO: logs should live for 30d not 7d
//
this.hash =
this?.meta?.session?.isAllowlisted && this?.meta?.session?.fingerprint
? this.meta.session.fingerprint
: getQueryHash(this);
this.hash = getQueryHash(this);
next();
} catch (err) {
err.is_duplicate_log = true;
Expand Down
5 changes: 4 additions & 1 deletion helpers/logger.js
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,8 @@ async function hook(err, message, meta) {
// this should never happen but it's a conditional safeguard
if (_.isError(log.err)) log.err = JSON.parse(safeStringify(log.err));

log.hash = conn.models.Logs.getQueryHash(log);

return conn.models.Logs.create(log)
.then()
.catch((err) => {
Expand All @@ -241,7 +243,8 @@ async function hook(err, message, meta) {
)
return;
// unique hash (already exists)
if (err.code === 11000) return;
if (err.code === 11000 || err.message === 'Hash is not unique.')
return;
//
// NOTE: this allows us to log mongodb timeout issues (e.g. due to slow queries)
//
Expand Down

0 comments on commit bc1ba56

Please sign in to comment.