Fix uni-watcher to handle mainnet data (#293)

* Fix watchers to handle mainnet data

* Tweak jobs fetched per interval to reduce event processing time
This commit is contained in:
nikugogoi 2021-12-02 15:28:03 +05:30 committed by GitHub
parent 08c712d766
commit 32fea1f2cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 12 additions and 15 deletions

View File

@ -61,6 +61,7 @@ export class JobRunner {
await this._indexer.processEvent(event);
}
await this._indexer.updateBlockProgress(event.block.blockHash, event.index);
await this._jobQueue.markComplete(job);
});
}

View File

@ -61,6 +61,7 @@ export class JobRunner {
await this._indexer.processEvent(event);
}
await this._indexer.updateBlockProgress(event.block.blockHash, event.index);
await this._jobQueue.markComplete(job);
});
}

View File

@ -63,6 +63,7 @@ export class JobRunner {
await this._indexer.processEvent(event);
}
await this._indexer.updateBlockProgress(event.block.blockHash, event.index);
await this._jobQueue.markComplete(job);
});
}

View File

@ -78,6 +78,7 @@ export class JobRunner {
await this._indexer.processEvent(dbEvent);
}
await this._indexer.updateBlockProgress(event.block.blockHash, event.index);
await this._jobQueue.markComplete(job);
});
}

View File

@ -105,7 +105,6 @@ export class EventWatcher {
const dbEvent = await this._indexer.getEvent(request.data.id);
assert(dbEvent);
await this._indexer.updateBlockProgress(dbEvent.block.blockHash, dbEvent.index);
const blockProgress = await this._indexer.getBlockProgress(dbEvent.block.blockHash);
if (blockProgress) {

View File

@ -253,7 +253,7 @@ export class Indexer {
let res;
try {
res = this._db.saveEventEntity(dbTx, dbEvent);
res = await this._db.saveEventEntity(dbTx, dbEvent);
await dbTx.commitTransaction();
} catch (error) {
await dbTx.rollbackTransaction();

View File

@ -13,6 +13,8 @@ interface Config {
type JobCallback = (job: any) => Promise<void>;
const JOBS_PER_INTERVAL = 5;
const log = debug('vulcanize:job-queue');
export class JobQueue {
@ -36,7 +38,7 @@ export class JobQueue {
retentionDays: 30, // 30 days
newJobCheckIntervalSeconds: 1
newJobCheckInterval: 100
});
this._boss.on('error', error => log(error));
@ -51,12 +53,12 @@ export class JobQueue {
}
async subscribe (queue: string, callback: JobCallback): Promise<string> {
return await this._boss.subscribe(queue, { teamSize: 1, teamConcurrency: 1 }, async (job: any) => {
return await this._boss.subscribe(queue, { teamSize: JOBS_PER_INTERVAL, teamConcurrency: 1 }, async (job: any) => {
try {
log(`Processing queue ${queue} job ${job.id}...`);
await callback(job);
} catch (error) {
log(`Error in queue ${queue}`);
log(`Error in queue ${queue} job ${job.id}`);
log(error);
throw error;
}
@ -64,7 +66,7 @@ export class JobQueue {
}
async onComplete (queue: string, callback: JobCallback): Promise<string> {
return await this._boss.onComplete(queue, async (job: any) => {
return await this._boss.onComplete(queue, { teamSize: JOBS_PER_INTERVAL, teamConcurrency: 1 }, async (job: any) => {
const { id, data: { failed, createdOn } } = job;
log(`Job onComplete for queue ${queue} job ${id} created ${createdOn} success ${!failed}`);
await callback(job);

View File

@ -56,14 +56,6 @@ export class JobRunner {
const event = dbEvent;
// Confirm that the parent block has been completely processed.
// We don't have to worry about aborting as this job will get retried later.
const parent = await this._indexer.getBlockProgress(event.block.parentHash);
if (!parent || !parent.isComplete) {
const message = `Abort processing of event ${id} as parent block not processed yet`;
throw new Error(message);
}
const blockProgress = await this._indexer.getBlockProgress(event.block.blockHash);
assert(blockProgress);
@ -159,7 +151,7 @@ export class JobRunner {
throw new Error(message);
}
if (parentHash !== syncStatus.latestCanonicalBlockHash && !parent.isComplete) {
if (!parent.isComplete) {
// Parent block indexing needs to finish before this block can be indexed.
const message = `Indexing incomplete for parent block number ${parent.blockNumber} hash ${parentHash} of block number ${blockNumber} hash ${blockHash}, aborting`;
log(message);