misc: save received nonces in token table (#71441) #28

Merged
fpeters merged 1 commits from wip/71441-nonces-as-db-tokens into main 2023-01-13 12:50:22 +01:00
8 changed files with 57 additions and 72 deletions

View File

@ -3,7 +3,6 @@ import datetime
import hashlib
import hmac
import os
import shutil
import urllib.parse
import pytest
@ -290,12 +289,7 @@ def test_is_url_signed_check_nonce(pub, local_user, freezer):
pub.site_options.set('api-secrets', ORIG, KEY)
with open(os.path.join(pub.app_dir, 'site-options.cfg'), 'w') as fd:
pub.site_options.write(fd)
# test clean_nonces do not bark when nonces directory is empty
if os.path.exists(os.path.join(pub.app_dir, 'nonces')):
shutil.rmtree(os.path.join(pub.app_dir, 'nonces'))
pub.clean_nonces(now=0)
nonce_dir = os.path.join(pub.app_dir, 'nonces')
assert not os.path.exists(nonce_dir) or not os.listdir(nonce_dir)
pub.token_class.wipe()
signed_url = sign_url('?format=json&orig=%s&email=%s' % (ORIG, urllib.parse.quote(local_user.email)), KEY)
req = HTTPRequest(
None, {'SCRIPT_NAME': '/', 'SERVER_NAME': 'example.net', 'QUERY_STRING': signed_url[1:]}
@ -310,18 +304,18 @@ def test_is_url_signed_check_nonce(pub, local_user, freezer):
is_url_signed()
assert exc_info.value.public_msg == 'nonce already used'
# test that clean nonces works
pub.clean_nonces()
assert os.listdir(nonce_dir)
pub.token_class.clean()
assert pub.token_class.exists()
# 80 seconds in the future, nothing should be cleaned
freezer.move_to(datetime.timedelta(seconds=80))
pub.clean_nonces()
assert os.listdir(nonce_dir)
# 20 seconds in the future, nothing should be cleaned
freezer.move_to(datetime.timedelta(seconds=20))
pub.token_class.clean()
assert pub.token_class.exists()
# 90 seconds in the future, nonces should be removed
freezer.move_to(datetime.timedelta(seconds=10))
pub.clean_nonces()
assert not os.listdir(nonce_dir)
# 40 seconds in the future, nonces should be removed
freezer.move_to(datetime.timedelta(seconds=20))
pub.token_class.clean()
assert not pub.token_class.exists()
def test_get_user_compat_endpoint(pub, local_user):

View File

@ -168,8 +168,6 @@ def test_register_cronjobs():
# noqa pylint: disable=not-an-iterable
assert 'clean_sessions' in [x.function.__name__ for x in pub.cronjobs]
# noqa pylint: disable=not-an-iterable
assert 'clean_nonces' in [x.function.__name__ for x in pub.cronjobs]
# noqa pylint: disable=not-an-iterable
assert 'clean_afterjobs' in [x.function.__name__ for x in pub.cronjobs]
# noqa pylint: disable=not-an-iterable
assert 'clean_tempfiles' in [x.function.__name__ for x in pub.cronjobs]

View File

@ -2517,6 +2517,24 @@ def test_form_tokens_migration(pub):
assert not os.path.exists(form_tokens_dir)
def test_nonces_migration(pub):
conn, cur = sql.get_connection_and_cursor()
cur.execute('UPDATE wcs_meta SET value = 76 WHERE key = %s', ('sql_level',))
conn.commit()
cur.close()
nonces_dir = os.path.join(pub.app_dir, 'nonces')
if not os.path.exists(nonces_dir):
os.mkdir(nonces_dir)
with open(os.path.join(nonces_dir, '1234'), 'w'):
pass
assert os.path.exists(os.path.join(nonces_dir, '1234'))
sql.migrate()
assert not os.path.exists(os.path.join(nonces_dir, '1234'))
assert not os.path.exists(nonces_dir)
def test_workflow_traces_initial_migration(pub):
formdef = FormDef()
formdef.name = 'tests'

View File

@ -15,12 +15,9 @@
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import base64
import calendar
import datetime
import errno
import hashlib
import hmac
import os
import random
import urllib.parse
@ -79,27 +76,12 @@ def is_url_signed(utcnow=None, duration=DEFAULT_DURATION):
# check nonce
nonce = get_request().form.get('nonce')
if nonce:
# compute end date of the nonce as an unix timestamp
until = timestamp + datetime.timedelta(seconds=duration)
until = calendar.timegm(until.timetuple())
# normalize nonce
nonce = nonce[:128]
nonce = simplify(nonce).replace('/', '-')
nonce_dir = os.path.join(get_publisher().app_dir, 'nonces')
nonce_path = os.path.join(nonce_dir, nonce)
try:
try:
if not os.path.exists(nonce_dir):
os.makedirs(nonce_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.close(os.open(nonce_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
# touch file in the future for cleaning_nonces
os.utime(nonce_path, (until, until))
except OSError as e:
if e.errno != errno.EEXIST:
raise
nonce = simplify(nonce[:128]).replace('/', '-')
dummy, created = get_publisher().token_class.get_or_create(
type='nonce', id=nonce, expiration_delay=duration
)
if not created:
raise AccessForbiddenError('nonce already used')
get_request().signed = True
return True

View File

@ -851,11 +851,9 @@ class NamedDataSource(XmlStorableObject):
if token_context:
token_context['session_id'] = get_session().id
token, created = get_publisher().token_class.get_or_create(
token, dummy = get_publisher().token_class.get_or_create(
type='autocomplete', context=token_context
)
if created:
token.store()
return '/api/autocomplete/%s' % token.id
return None
@ -866,11 +864,9 @@ class NamedDataSource(XmlStorableObject):
new_url = self.get_variadic_url()
if new_url != url:
token_context = {'session_id': get_session().id, 'url': new_url, 'slug': self.slug}
token, created = get_publisher().token_class.get_or_create(
token, dummy = get_publisher().token_class.get_or_create(
type='autocomplete', context=token_context
)
if created:
token.store()
return '/api/geojson/%s' % token.id
return '/api/geojson/%s' % self.slug

View File

@ -598,22 +598,6 @@ class QommonPublisher(Publisher):
return
cls.cronjobs.append(cronjob)
def clean_nonces(self, delta=60, now=None, **kwargs):
nonce_dir = os.path.join(get_publisher().app_dir, 'nonces')
if not os.path.exists(nonce_dir):
return
cleaning_lock_file = os.path.join(self.app_dir, 'cleaning_nonces.lock')
try:
now = now or time.time()
with locket.lock_file(cleaning_lock_file, timeout=0):
for nonce in os.listdir(nonce_dir):
nonce_path = os.path.join(nonce_dir, nonce)
# we need delta so that os.utime in is_url_signed has time to update file timestamp
if delta < now - os.stat(nonce_path)[8]:
os.unlink(nonce_path)
except locket.LockError:
pass
def clean_sessions(self, **kwargs):
cleaning_lock_file = os.path.join(self.app_dir, 'cleaning_sessions.lock')
try:
@ -711,7 +695,6 @@ class QommonPublisher(Publisher):
@classmethod
def register_cronjobs(cls):
cls.register_cronjob(CronJob(cls.clean_sessions, minutes=range(0, 60, 5), name='clean_sessions'))
cls.register_cronjob(CronJob(cls.clean_nonces, minutes=range(0, 60, 5), name='clean_nonces'))
lguerin marked this conversation as resolved
Review

on ne veut pas nettoyer les entrées expirées, quand meme ?

on ne veut pas nettoyer les entrées expirées, quand meme ?
Review

(phrase coupée :/ )

Elles sont nettoyées dans clean_tokens, qui joue une seule fois par heure, vs toutes les cinq minutes, mais ça va.

(phrase coupée :/ ) Elles sont nettoyées dans clean_tokens, qui joue une seule fois par heure, vs toutes les cinq minutes, mais ça va.
Review

ok

ok
cls.register_cronjob(CronJob(cls.clean_afterjobs, minutes=[0], name='clean_afterjobs'))
cls.register_cronjob(CronJob(cls.clean_tokens, minutes=[0], name='clean_tokens'))
cls.register_cronjob(CronJob(cls.clean_tempfiles, minutes=[0], name='clean_tempfiles'))

View File

@ -31,10 +31,10 @@ class Token(StorableObject):
expiration = None
context = None
def __init__(self, expiration_delay=86400, size=16, chars=None):
super().__init__(self.get_new_id(size=size, chars=chars))
def __init__(self, expiration_delay=None, size=16, chars=None, id=None):
super().__init__(id or self.get_new_id(size=size, chars=chars))
if expiration_delay:
self.set_expiration_delay(expiration_delay)
self.set_expiration_delay(expiration_delay or 86400)
def set_expiration_delay(self, expiration_delay):
expiration_delay = min(expiration_delay, self.MAX_DELAY * 86400) # max 1 year.
@ -50,13 +50,20 @@ class Token(StorableObject):
return id
@classmethod
def get_or_create(cls, type, context):
def get_or_create(cls, type, id=None, context=None, expiration_delay=None):
try:
return (cls.select(clause=[Equal('type', type), Equal('context', context)])[0], False)
clauses = [Equal('type', type)]
if id is not None:
clauses.append(Equal('id', id))
elif context is not None:
clauses.append(Equal('context', context))
return (cls.select(clause=clauses)[0], False)
except (IndexError, KeyError):
token = cls()
token = cls(id=id, expiration_delay=expiration_delay)
token.id = id
token.type = type
token.context = context
token.store()
return (token, True)
def migrate(self):

View File

@ -4949,7 +4949,7 @@ def get_period_total(
# latest migration, number + description (description is not used
# programmaticaly but will make sure git conflicts if two migrations are
# separately added with the same number)
SQL_LEVEL = (76, 'add index to workflow traces table')
SQL_LEVEL = (77, 'use token table for nonces')
def migrate_global_views(conn, cur):
@ -5256,6 +5256,13 @@ def migrate():
# 75 (part 2): migrate to dedicated workflow traces table
set_reindex('workflow_traces_migration', 'needed', conn=conn, cur=cur)
if sql_level < 77:
# 77: use token table for nonces
# it uses the existing tokens table, this "migration" is just to remove old files.
nonces_dir = os.path.join(get_publisher().app_dir, 'nonces')
if os.path.exists(nonces_dir):
shutil.rmtree(nonces_dir, ignore_errors=True)
if sql_level != SQL_LEVEL[0]:
cur.execute(
'''UPDATE wcs_meta SET value = %s, updated_at=NOW() WHERE key = %s''',