Skip to content

Commit 4a5edb7

Browse files
committed
flake8 - one var rename, import, whitespace
1 parent 97f4401 commit 4a5edb7

File tree

1 file changed

+24
-17
lines changed

1 file changed

+24
-17
lines changed

roundup/backends/indexer_dbm.py

Lines changed: 24 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,35 @@
11
#
22
# This module is derived from the module described at:
33
# http://gnosis.cx/publish/programming/charming_python_15.txt
4-
#
4+
#
55
# Author: David Mertz ([email protected])
66
# Thanks to: Pat Knight ([email protected])
77
# Gregory Popovitch ([email protected])
8-
#
8+
#
99
# The original module was released under this license, and remains under
1010
# it:
1111
#
1212
# This file is released to the public domain. I (dqm) would
1313
# appreciate it if you choose to keep derived works under terms
1414
# that promote freedom, but obviously am giving up any rights
1515
# to compel such.
16-
#
16+
#
1717
'''This module provides an indexer class, RoundupIndexer, that stores text
1818
indices in a roundup instance. This class makes searching the content of
1919
messages, string properties and text files possible.
2020
'''
2121
__docformat__ = 'restructuredtext'
2222

23-
import os, shutil, re, mimetypes, marshal, zlib, errno
24-
from roundup.hyperdb import Link, Multilink
23+
import errno
24+
import marshal
25+
import os
26+
import re
27+
import shutil
28+
import zlib
29+
2530
from roundup.backends.indexer_common import Indexer as IndexerBase
2631

32+
2733
class Indexer(IndexerBase):
2834
'''Indexes information from roundup's hyperdb to allow efficient
2935
searching.
@@ -137,7 +143,7 @@ def text_splitter(self, text):
137143
"""
138144
if not text:
139145
return []
140-
146+
141147
# case insensitive
142148
text = text.upper()
143149

@@ -163,7 +169,7 @@ def find(self, wordlist):
163169
if self.is_stopword(word):
164170
continue
165171
entry = self.words.get(word) # For each word, get index
166-
entries[word] = entry # of matching files
172+
entries[word] = entry # of matching files
167173
if not entry: # Nothing for this one word (fail)
168174
return {}
169175
if hits is None:
@@ -182,19 +188,20 @@ def find(self, wordlist):
182188
return list(hits.values())
183189

184190
segments = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ#_-!"
191+
185192
def load_index(self, reload=0, wordlist=None):
186193
# Unless reload is indicated, do not load twice
187194
if self.index_loaded() and not reload:
188195
return 0
189196

190197
# Ok, now let's actually load it
191-
db = {'WORDS': {}, 'FILES': {'_TOP':(0,None)}, 'FILEIDS': {}}
198+
db = {'WORDS': {}, 'FILES': {'_TOP': (0, None)}, 'FILEIDS': {}}
192199

193200
# Identify the relevant word-dictionary segments
194201
if not wordlist:
195202
segments = self.segments
196203
else:
197-
segments = ['-','#']
204+
segments = ['-', '#']
198205
for word in wordlist:
199206
initchar = word[0].upper()
200207
if initchar not in self.segments:
@@ -207,7 +214,7 @@ def load_index(self, reload=0, wordlist=None):
207214
f = open(self.indexdb + segment, 'rb')
208215
except IOError as error:
209216
# probably just nonexistent segment index file
210-
if error.errno != errno.ENOENT: raise
217+
if error.errno != errno.ENOENT: raise # noqa: E701
211218
else:
212219
pickle_str = zlib.decompress(f.read())
213220
f.close()
@@ -239,11 +246,11 @@ def save_index(self):
239246
os.remove(self.indexdb + segment)
240247
except OSError as error:
241248
# probably just nonexistent segment index file
242-
if error.errno != errno.ENOENT: raise
249+
if error.errno != errno.ENOENT: raise # noqa: E701
243250

244251
# First write the much simpler filename/fileid dictionaries
245-
dbfil = {'WORDS':None, 'FILES':self.files, 'FILEIDS':self.fileids}
246-
open(self.indexdb+'-','wb').write(zlib.compress(marshal.dumps(dbfil)))
252+
dbfil = {'WORDS': None, 'FILES': self.files, 'FILEIDS': self.fileids}
253+
open(self.indexdb+'-', 'wb').write(zlib.compress(marshal.dumps(dbfil)))
247254

248255
# The hard part is splitting the word dictionary up, of course
249256
letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ#_"
@@ -259,7 +266,7 @@ def save_index(self):
259266

260267
# save
261268
for initchar in letters:
262-
db = {'WORDS':segdicts[initchar], 'FILES':None, 'FILEIDS':None}
269+
db = {'WORDS': segdicts[initchar], 'FILES': None, 'FILEIDS': None}
263270
pickle_str = marshal.dumps(db)
264271
filename = self.indexdb + initchar
265272
pickle_fh = open(filename, 'wb')
@@ -283,16 +290,16 @@ def purge_entry(self, identifier):
283290
del self.fileids[file_index]
284291

285292
# The much harder part, cleanup the word index
286-
for key, occurs in self.words.items():
293+
for _key, occurs in self.words.items():
287294
if file_index in occurs:
288295
del occurs[file_index]
289296

290297
# save needed
291298
self.changed = 1
292299

293300
def index_loaded(self):
294-
return (hasattr(self,'fileids') and hasattr(self,'files') and
295-
hasattr(self,'words'))
301+
return (hasattr(self, 'fileids') and hasattr(self, 'files') and
302+
hasattr(self, 'words'))
296303

297304
def rollback(self):
298305
''' load last saved index info. '''

0 commit comments

Comments
 (0)