Hi.
Here is how we implemented auto-retry sending instances:
import orthanc
import json
import os
import sqlite3
import contextlib
APP_DB_FILE = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '../orthanc-python-data/app-db.db')
with contextlib.closing(sqlite3.connect(APP_DB_FILE)) as connection:
with contextlib.closing(connection.cursor()) as cursor:
cursor.execute(
"CREATE TABLE IF NOT EXISTS job_to_instance (job_id TEXT, instance_id TEXT)")
connection.commit()
def on_change(changeType, level, resource):
try:
with contextlib.closing(sqlite3.connect(APP_DB_FILE)) as connection:
with contextlib.closing(connection.cursor()) as cursor:
if changeType == orthanc.ChangeType.NEW_INSTANCE:
job_info = json.loads(orthanc.RestApiPost(
'/peers/server/store', json.dumps({"Resources": [resource], "Synchronous": False})))
cursor.execute(
'INSERT INTO job_to_instance (job_id, instance_id) VALUES (?, ?)', (job_info['ID'], resource))
connection.commit()
if changeType == orthanc.ChangeType.JOB_FAILURE:
job = cursor.execute(
"SELECT instance_id FROM job_to_instance where job_id = ?", (resource,)).fetchall()
connection.commit()
cursor.execute(
"DELETE FROM job_to_instance WHERE job_id = ?", (resource,))
connection.commit()
job_info = json.loads(orthanc.RestApiPost('/peers/server/store', json.dumps(
{"Resources": [job[0]['instance_id']], "Synchronous": False})))
cursor.execute('INSERT INTO job_to_instance (job_id, instance_id) VALUES (?, ?)', (
job_info['ID'], job[0]['instance_id']))
connection.commit()
if changeType == orthanc.ChangeType.STABLE_STUDY:
print('important logic here...')
print('TODO: clear job_to_instance table after some time')
except Exception as e:
print(e)
orthanc.RegisterOnChangeCallback(on_change)
But if thousands of instances retry again every time, and Orthanc has a lot of pending jobs, will new uploaded studies be waiting for those jobs to be completed?