Skip to content

Commit d64340f

Browse files
committed
issue2551224 - Replace dbm db for sessions/otks when using sqlite
Generate new sqlite db's for storing one time keys and session and other ephemeral data.
1 parent 98b92e9 commit d64340f

File tree

4 files changed

+233
-5
lines changed

4 files changed

+233
-5
lines changed

CHANGES.txt

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,15 @@ Fixed:
1818

1919
- Dockerfile healthcheck fixed so it works when trackers are specified
2020
on command line. Also cleanup of unneeded packages.
21-
21+
- issue2551224 - Replace dbm db for sessions and otks when using
22+
sqlite. New databases are created for session data (db-session)
23+
and one time key data (db-otk). The data is ephemeral so no need to
24+
migrate.
2225

2326
Features:
2427

2528
- Dockerfile build allows adding additional python packages via pip,
26-
setting UID traker is run under.
29+
setting UID tracker is run under.
2730

2831
2022-07-13 2.2.0
2932

doc/upgrading.txt

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,39 @@ Contents:
3030
.. contents::
3131
:local:
3232

33+
.. index:: Upgrading; 2.2.0 to 2.3.0
34+
35+
Migrating from 2.2.0 to 2.3.0
36+
=============================
37+
38+
Session/OTK data storage for SQLite backend changed
39+
---------------------------------------------------
40+
41+
Roundup stores a lot of ephemeral data:
42+
43+
* login session tokens,
44+
* rate limits
45+
* password reset attempt tokens
46+
* one time keys
47+
* and anti CSRF keys.
48+
49+
These were stored using dbm style files while the main data
50+
is stored in a SQLite db. Using both dbm and sqlite style
51+
files is surprising and due to how we lock dbm files can be
52+
a performance issue.
53+
54+
In this release two sqlite databases called ``db-otk`` and
55+
``db-session`` replace the dbm databases. Once you make the
56+
change the old ``otks`` and ``sessions`` dbm databases can
57+
be removed.
58+
59+
Note this replacement will require users to log in again and
60+
refresh web pages to save data. It is best is people save
61+
all their changes and log out of Roundup before the upgrade
62+
is done to minimize confusion. Because the data is
63+
ephemeral, there is no plan to migrate this data to the new
64+
SQLite databases.
65+
3366
.. index:: Upgrading; 2.1.0 to 2.2.0
3467

3568
Migrating from 2.1.0 to 2.2.0

roundup/backends/back_sqlite.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
from roundup import hyperdb, date, password
1414
from roundup.backends import rdbms_common
15-
from roundup.backends.sessions_dbm import Sessions, OneTimeKeys
15+
from roundup.backends.sessions_sqlite import Sessions, OneTimeKeys
1616
from roundup.anypy.strings import uany2s
1717

1818
sqlite_version = None
@@ -128,7 +128,7 @@ def sqlite_busy_handler(self, data, table, count):
128128
time.sleep(time_to_sleep)
129129
return 1
130130

131-
def sql_open_connection(self):
131+
def sql_open_connection(self, dbname=None):
132132
"""Open a standard, non-autocommitting connection.
133133
134134
pysqlite will automatically BEGIN TRANSACTION for us.
@@ -138,7 +138,10 @@ def sql_open_connection(self):
138138
if not os.path.isdir(self.config.DATABASE):
139139
os.makedirs(self.config.DATABASE)
140140

141-
db = os.path.join(self.config.DATABASE, 'db')
141+
if dbname:
142+
db = os.path.join(self.config.DATABASE, 'db-' + dbname)
143+
else:
144+
db = os.path.join(self.config.DATABASE, 'db')
142145
logging.getLogger('roundup.hyperdb').info('open database %r' % db)
143146
# set timeout (30 second default is extraordinarily generous)
144147
# for handling locked database
Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
"""This module defines a very basic store that's used by the CGI interface
2+
to store session and one-time-key information.
3+
4+
Yes, it's called "sessions" - because originally it only defined a session
5+
class. It's now also used for One Time Key handling too.
6+
7+
We needed to split commits to session/OTK database from commits on the
8+
main db structures (user data). This required two connections to the
9+
sqlite db, which wasn't supported. This module was created so sqlite
10+
didn't have to use dbm for the session/otk data. It hopefully will
11+
provide a performance speedup.
12+
"""
13+
__docformat__ = 'restructuredtext'
14+
import os, time, logging
15+
16+
from roundup.anypy.html import html_escape as escape
17+
18+
class BasicDatabase:
19+
''' Provide a nice encapsulation of an RDBMS table.
20+
21+
Keys are id strings, values are automatically marshalled data.
22+
'''
23+
name = None
24+
def __init__(self, db):
25+
self.db = db
26+
self.conn, self.cursor = self.db.sql_open_connection(dbname=self.name)
27+
28+
self.sql('''SELECT name FROM sqlite_master WHERE type='table' AND name='%ss';'''%self.name)
29+
table_exists = self.cursor.fetchone()
30+
31+
if not table_exists:
32+
# create table/rows etc.
33+
self.sql('''CREATE TABLE %(name)ss (%(name)s_key VARCHAR(255),
34+
%(name)s_value TEXT, %(name)s_time REAL)'''%{"name":self.name})
35+
self.sql('CREATE INDEX %(name)s_key_idx ON %(name)ss(%(name)s_key)'%{"name":self.name})
36+
self.commit()
37+
38+
def log_debug(self, msg, *args, **kwargs):
39+
"""Log a message with level DEBUG."""
40+
41+
logger = self.get_logger()
42+
logger.debug(msg, *args, **kwargs)
43+
44+
def log_info(self, msg, *args, **kwargs):
45+
"""Log a message with level INFO."""
46+
47+
logger = self.get_logger()
48+
logger.info(msg, *args, **kwargs)
49+
50+
def get_logger(self):
51+
"""Return the logger for this database."""
52+
53+
# Because getting a logger requires acquiring a lock, we want
54+
# to do it only once.
55+
if not hasattr(self, '__logger'):
56+
self.__logger = logging.getLogger('roundup')
57+
58+
return self.__logger
59+
60+
def sql(self, sql, args=None, cursor=None):
61+
""" Execute the sql with the optional args.
62+
"""
63+
self.log_debug('SQL %r %r' % (sql, args))
64+
if not cursor:
65+
cursor = self.cursor
66+
if args:
67+
cursor.execute(sql, args)
68+
else:
69+
cursor.execute(sql)
70+
71+
def clear(self):
72+
self.cursor.execute('delete from %ss'%self.name)
73+
74+
def exists(self, infoid):
75+
n = self.name
76+
self.cursor.execute('select count(*) from %ss where %s_key=%s'%(n,
77+
n, self.db.arg), (infoid,))
78+
return int(self.cursor.fetchone()[0])
79+
80+
_marker = []
81+
def get(self, infoid, value, default=_marker):
82+
n = self.name
83+
self.cursor.execute('select %s_value from %ss where %s_key=%s'%(n,
84+
n, n, self.db.arg), (infoid,))
85+
res = self.cursor.fetchone()
86+
if not res:
87+
if default != self._marker:
88+
return default
89+
raise KeyError('No such %s "%s"'%(self.name, escape(infoid)))
90+
values = eval(res[0])
91+
return values.get(value, None)
92+
93+
def getall(self, infoid):
94+
n = self.name
95+
self.cursor.execute('select %s_value from %ss where %s_key=%s'%(n,
96+
n, n, self.db.arg), (infoid,))
97+
res = self.cursor.fetchone()
98+
if not res:
99+
raise KeyError('No such %s "%s"'%(self.name, escape (infoid)))
100+
return eval(res[0])
101+
102+
def set(self, infoid, **newvalues):
103+
""" Store all newvalues under key infoid with a timestamp in database.
104+
105+
If newvalues['__timestamp'] exists and is representable as a floating point number
106+
(i.e. could be generated by time.time()), that value is used for the <name>_time
107+
column in the database.
108+
"""
109+
c = self.cursor
110+
n = self.name
111+
a = self.db.arg
112+
c.execute('select %s_value from %ss where %s_key=%s'%(n, n, n, a),
113+
(infoid,))
114+
res = c.fetchone()
115+
if res:
116+
values = eval(res[0])
117+
else:
118+
values = {}
119+
values.update(newvalues)
120+
121+
if res:
122+
sql = 'update %ss set %s_value=%s where %s_key=%s'%(n, n,
123+
a, n, a)
124+
args = (repr(values), infoid)
125+
else:
126+
if '__timestamp' in newvalues:
127+
try:
128+
# __timestamp must be represntable as a float. Check it.
129+
timestamp = float(newvalues['__timestamp'])
130+
except ValueError:
131+
timestamp = time.time()
132+
else:
133+
timestamp = time.time()
134+
135+
sql = 'insert into %ss (%s_key, %s_time, %s_value) '\
136+
'values (%s, %s, %s)'%(n, n, n, n, a, a, a)
137+
args = (infoid, timestamp, repr(values))
138+
c.execute(sql, args)
139+
140+
def list(self):
141+
c = self.cursor
142+
n = self.name
143+
c.execute('select %s_key from %ss'%(n, n))
144+
return [res[0] for res in c.fetchall()]
145+
146+
def destroy(self, infoid):
147+
self.cursor.execute('delete from %ss where %s_key=%s'%(self.name,
148+
self.name, self.db.arg), (infoid,))
149+
150+
def updateTimestamp(self, infoid):
151+
""" don't update every hit - once a minute should be OK """
152+
now = time.time()
153+
self.cursor.execute('''update %ss set %s_time=%s where %s_key=%s
154+
and %s_time < %s'''%(self.name, self.name, self.db.arg,
155+
self.name, self.db.arg, self.name, self.db.arg),
156+
(now, infoid, now-60))
157+
158+
def clean(self):
159+
''' Remove session records that haven't been used for a week. '''
160+
now = time.time()
161+
week = 60*60*24*7
162+
old = now - week
163+
self.cursor.execute('delete from %ss where %s_time < %s'%(self.name,
164+
self.name, self.db.arg), (old, ))
165+
166+
def lifetime(self, key_lifetime=None):
167+
"""Return the proper timestamp for a key with key_lifetime specified
168+
in seconds.
169+
"""
170+
now = time.time()
171+
week = 60*60*24*7
172+
return now - week + lifetime
173+
174+
def commit(self):
175+
logger = logging.getLogger('roundup.hyperdb.backend')
176+
logger.info('commit %s' % self.name)
177+
self.conn.commit()
178+
self.cursor = self.conn.cursor()
179+
180+
def close(self):
181+
self.conn.close()
182+
183+
class Sessions(BasicDatabase):
184+
name = 'session'
185+
186+
class OneTimeKeys(BasicDatabase):
187+
name = 'otk'
188+
189+
# vim: set et sts=4 sw=4 :

0 commit comments

Comments
 (0)