199 lines
6.8 KiB
Python
199 lines
6.8 KiB
Python
import urllib.parse
|
|
|
|
import requests
|
|
from django.db import models
|
|
from django.utils.text import slugify
|
|
from django.utils.translation import gettext_lazy as _
|
|
|
|
from passerelle.base.models import BaseResource, HTTPResource
|
|
from passerelle.utils.api import endpoint
|
|
from passerelle.utils.conversion import exception_to_text
|
|
from passerelle.utils.jsonresponse import APIError
|
|
|
|
MULT_SCHEMA = {
|
|
'$schema': 'http://json-schema.org/draft-04/schema#',
|
|
'title': 'Multi-criterion search',
|
|
'unflatten': True,
|
|
'description': '',
|
|
'type': 'object',
|
|
'required': ['search_name', 'criterions'],
|
|
'properties': {
|
|
'search_name': {
|
|
'description': _('Search name, as specified in the Esabora webservice'),
|
|
'type': 'string',
|
|
'examples': ['WS_ETAT_DOSSIER_SAS'],
|
|
},
|
|
'criterions': {
|
|
'description': _('A mapping of criterions'),
|
|
'type': 'object',
|
|
'examples': [{'SAS_Référence': 'HISTO0001'}],
|
|
},
|
|
},
|
|
}
|
|
|
|
DO_TREATMENT_SCHEMA = {
|
|
'$schema': 'http://json-schema.org/draft-04/schema#',
|
|
'unflatten': True,
|
|
'title': 'Treatment creation',
|
|
'description': 'Additional fields in the payload will be transmitted to the Esabora service.',
|
|
'type': 'object',
|
|
'required': ['treatment_name'],
|
|
'properties': {
|
|
'endpoint': {
|
|
'description': _('Endpoint name, such as modbdd or addevt. Defaults to modbdd'),
|
|
'type': 'string',
|
|
'examples': ['modbdd'],
|
|
},
|
|
'treatment_name': {
|
|
'description': _('Treatment name, as specified in the Esabora service'),
|
|
'type': 'string',
|
|
'examples': ['IMPORT HISTOLOGE'],
|
|
},
|
|
},
|
|
}
|
|
|
|
|
|
class Esabora(BaseResource, HTTPResource):
|
|
|
|
service_url = models.URLField(
|
|
blank=False,
|
|
verbose_name=_('Service URL'),
|
|
help_text=_('Base Web Service URL, such as https://example.domain/ws/rest/'),
|
|
)
|
|
|
|
api_key = models.CharField(max_length=256, default='', blank=True, verbose_name=_('API key'))
|
|
|
|
category = _('Business Process Connectors')
|
|
|
|
class Meta:
|
|
verbose_name = _('Esabora')
|
|
|
|
def post(self, path, payload, **kwargs):
|
|
url = urllib.parse.urljoin(self.service_url, path)
|
|
headers = {'Authorization': f'Bearer {self.api_key}'}
|
|
try:
|
|
response = self.requests.post(url, json=payload, headers=headers, timeout=5, **kwargs)
|
|
except requests.RequestException as e:
|
|
raise APIError(
|
|
'Esabora platform "%s" connection error: %s' % (self.service_url, exception_to_text(e)),
|
|
log_error=True,
|
|
data={
|
|
'code': 'connection-error',
|
|
'service_url': self.service_url,
|
|
'error': str(e),
|
|
},
|
|
)
|
|
try:
|
|
data = response.json()
|
|
except requests.JSONDecodeError as e:
|
|
raise APIError(
|
|
'Esabora platform "%s" invalid JSON response: %s' % (self.service_url, exception_to_text(e)),
|
|
log_error=True,
|
|
data={
|
|
'status_code': response.status_code,
|
|
},
|
|
)
|
|
if not response.ok:
|
|
raise APIError(
|
|
'Esabora platform "%s" answered with HTTP error' % (self.service_url),
|
|
log_error=True,
|
|
data={
|
|
'status_code': response.status_code,
|
|
'content': data,
|
|
},
|
|
)
|
|
return data
|
|
|
|
@endpoint(
|
|
name='do-search',
|
|
description=_('Multi-criterion search'),
|
|
perm='can_access',
|
|
methods=['post'],
|
|
post={'request_body': {'schema': {'application/json': MULT_SCHEMA}}},
|
|
json_schema_response={},
|
|
)
|
|
def do_search(self, request, post_data):
|
|
payload = {
|
|
'searchName': post_data['search_name'],
|
|
'criterionList': [
|
|
{'criterionName': name, 'criterionValueList': [value]}
|
|
for name, value in post_data['criterions'].items()
|
|
],
|
|
}
|
|
data = self.post('mult/', payload, params={'task': 'doSearch'})
|
|
columns = {slugify(c).replace('-', '_'): c for c in data['columnList']}
|
|
keys = {slugify(c).replace('-', '_'): c for c in data['keyList']}
|
|
cleaned_data = {
|
|
'meta': {
|
|
'nbResults': data['nbResults'],
|
|
'searchId': data['searchId'],
|
|
'columns_name': columns,
|
|
'keys_name': keys,
|
|
},
|
|
'data': [
|
|
esabora_row_to_object(list(columns.keys()), list(keys.keys()), row) for row in data['rowList']
|
|
],
|
|
}
|
|
return cleaned_data
|
|
|
|
@endpoint(
|
|
name='do-treatment',
|
|
description=_('Create a new treatment'),
|
|
perm='can_access',
|
|
methods=['post'],
|
|
post={'request_body': {'schema': {'application/json': DO_TREATMENT_SCHEMA}}},
|
|
json_schema_response={},
|
|
)
|
|
def do_treatment(self, request, post_data):
|
|
endpoint = post_data.pop('endpoint', None) or 'modbdd'
|
|
payload = get_treatment_payload(post_data)
|
|
|
|
data = self.post(f'{endpoint}/', payload, params={'task': 'doTreatment'})
|
|
keys = [slugify(c).replace('-', '_') for c in data['keyList']]
|
|
cleaned_data = esabora_row_to_object([], keys, data)
|
|
cleaned_data['action'] = data['action']
|
|
return cleaned_data
|
|
|
|
|
|
def esabora_row_to_object(columns, key_list, row):
|
|
key_data_list = row.get('keyDataList', [])
|
|
all_values = row.get('columnDataList', []) + key_data_list
|
|
obj = dict(zip(columns + key_list, all_values))
|
|
obj['text'] = all_values[0] if all_values else None
|
|
if key_data_list:
|
|
obj['id'] = key_data_list[0]
|
|
return obj
|
|
|
|
|
|
def get_treatment_payload(post_data):
|
|
payload = {'treatmentName': post_data.pop('treatment_name'), 'fieldList': []}
|
|
for key, value in post_data.items():
|
|
field = {
|
|
'fieldName': key,
|
|
}
|
|
if isinstance(value, list):
|
|
populate_document_field(field, value)
|
|
elif isinstance(value, dict):
|
|
populate_document_field(field, [value])
|
|
else:
|
|
field['fieldValue'] = value
|
|
|
|
payload['fieldList'].append(field)
|
|
|
|
return payload
|
|
|
|
|
|
def populate_document_field(field, value):
|
|
field['fieldDocumentUpdate'] = 1
|
|
field['fieldValue'] = []
|
|
# one or more files were sent,
|
|
# let's compute their size in bytes based on the base64 payload (without = padding)
|
|
# cf https://stackoverflow.com/a/45401395/2844093
|
|
for f in value:
|
|
if not f:
|
|
continue
|
|
size = (len(f['content']) * 3) / 4 - f['content'].count('=', -2)
|
|
field['fieldValue'].append(
|
|
{'documentContent': f['content'], 'documentName': f['filename'], 'documentSize': size}
|
|
)
|