[gen] Bugfixes in the search machinery.
This commit is contained in:
parent
cf2cbc52d6
commit
225ea927a4
16 changed files with 81 additions and 43 deletions
|
@ -2,6 +2,7 @@
|
|||
indexed.'''
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
from appy.gen.utils import splitIntoWords
|
||||
from appy.shared.xml_parser import XmlParser
|
||||
from appy.shared.utils import normalizeText
|
||||
|
||||
|
@ -68,23 +69,6 @@ def updateIndexes(installer, indexInfo):
|
|||
catalog.reindexIndex(indexName, installer.app.REQUEST)
|
||||
logger.info('Done.')
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def splitIntoWords(text, ignore=2):
|
||||
'''Split the cleaned index value p_text into words (returns a list of
|
||||
words). Words whose length is below p_ignore are ignored, excepted digits
|
||||
which are always kept. Duplicate words are removed (result is a set and
|
||||
not a list).'''
|
||||
# Split p_text into words
|
||||
res = text.split()
|
||||
# Remove shorter words not being figures
|
||||
i = len(res) - 1
|
||||
while i > -1:
|
||||
if (len(res[i]) <= ignore) and not res[i].isdigit():
|
||||
del res[i]
|
||||
i -= 1
|
||||
# Remove duplicates
|
||||
return set(res)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class XhtmlTextExtractor(XmlParser):
|
||||
'''Extracts text from XHTML.'''
|
||||
|
|
|
@ -141,7 +141,7 @@ class ZopeInstaller:
|
|||
wrapperClass = tool.getAppyClass(className, wrapper=True)
|
||||
indexInfo.update(wrapperClass.getIndexes(includeDefaults=False))
|
||||
updateIndexes(self, indexInfo)
|
||||
# Re-index index "SearchableText", wrongly defined for Appy < 0.8.3.
|
||||
# Re-index index "SearchableText", wrongly defined for Appy < 0.8.3
|
||||
stIndex = catalog.Indexes['SearchableText']
|
||||
if stIndex.indexSize() == 0:
|
||||
self.logger.info('reindexing SearchableText...')
|
||||
|
@ -283,13 +283,13 @@ class ZopeInstaller:
|
|||
# "select" field, because it will be necessary for displaying the
|
||||
# translated state name.
|
||||
state = gen.String(validator=gen.Selection('listStates'),
|
||||
show='result')
|
||||
show='result', persist=False, indexed=True)
|
||||
state.init('state', None, 'workflow')
|
||||
setattr(wrapperClass, 'state', state)
|
||||
# Special field "SearchableText" must be added fot every class and
|
||||
# will allow to display a search widget for entering keywords for
|
||||
# searhing in index "SearchableText".
|
||||
searchable = gen.String(show=False)
|
||||
searchable = gen.String(show=False, persist=False, indexed=True)
|
||||
searchable.init('SearchableText', None, 'appy')
|
||||
setattr(wrapperClass, 'SearchableText', searchable)
|
||||
# Set field "__fields__" on the wrapper class
|
||||
|
|
|
@ -1037,6 +1037,7 @@ class BaseMixin:
|
|||
'''Gets the i18n label for p_name (which can denote a state or a
|
||||
transition), or for the current object state if p_name is None.'''
|
||||
name = name or self.State()
|
||||
if name == 'create_from_predecessor': return name
|
||||
return '%s_%s' % (self.getWorkflow(name=True), name)
|
||||
|
||||
def getTransitions(self, includeFake=True, includeNotShowable=False,
|
||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
|||
msgid "action_comment"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr ""
|
||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
|||
msgid "action_comment"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr ""
|
||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
|||
msgid "action_comment"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr ""
|
||||
|
|
|
@ -516,6 +516,10 @@ msgstr "Date"
|
|||
msgid "action_comment"
|
||||
msgstr "Comment"
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr "Create from a delayed item"
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr "Mon"
|
||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
|||
msgid "action_comment"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr ""
|
||||
|
|
|
@ -516,6 +516,10 @@ msgstr "Date"
|
|||
msgid "action_comment"
|
||||
msgstr "Commentaire"
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr "Créer depuis un point reporté"
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr "Lun"
|
||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
|||
msgid "action_comment"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr ""
|
||||
|
|
|
@ -515,6 +515,10 @@ msgstr "Datum"
|
|||
msgid "action_comment"
|
||||
msgstr "Commentaar"
|
||||
|
||||
#. Default: "Create from a delayed item"
|
||||
msgid "create_from_predecessor"
|
||||
msgstr ""
|
||||
|
||||
#. Default: "Mon"
|
||||
msgid "day_Mon_short"
|
||||
msgstr "Maa"
|
||||
|
|
23
gen/utils.py
23
gen/utils.py
|
@ -91,18 +91,35 @@ class SomeObjects:
|
|||
else: getMethod = 'getObject'
|
||||
self.objects = [getattr(b, getMethod)() for b in brains]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def splitIntoWords(text, ignore=2):
|
||||
'''Split the cleaned index value p_text into words (returns a list of
|
||||
words). Words whose length is below p_ignore are ignored, excepted digits
|
||||
which are always kept. Duplicate words are removed (result is a set and
|
||||
not a list).'''
|
||||
# Split p_text into words
|
||||
res = text.split()
|
||||
# Remove shorter words not being figures
|
||||
i = len(res) - 1
|
||||
while i > -1:
|
||||
if (len(res[i]) <= ignore) and not res[i].isdigit():
|
||||
del res[i]
|
||||
i -= 1
|
||||
# Remove duplicates
|
||||
return set(res)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Keywords:
|
||||
'''This class allows to handle keywords that a user enters and that will be
|
||||
used as basis for performing requests in a TextIndex/XhtmlIndex.'''
|
||||
|
||||
toRemove = '?-+*()'
|
||||
def __init__(self, keywords, operator='AND'):
|
||||
# Clean the p_keywords that the user has entered.
|
||||
def __init__(self, keywords, operator='AND', ignore=2):
|
||||
# Clean the p_keywords that the user has entered
|
||||
words = sutils.normalizeText(keywords)
|
||||
if words == '*': words = ''
|
||||
for c in self.toRemove: words = words.replace(c, ' ')
|
||||
self.keywords = words.split()
|
||||
self.keywords = splitIntoWords(words, ignore=ignore)
|
||||
# Store the operator to apply to the keywords (AND or OR)
|
||||
self.operator = operator
|
||||
|
||||
|
|
|
@ -302,7 +302,7 @@ class AbstractWrapper(object):
|
|||
# PXs for rendering graphical elements tied to a given object
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
# This PX displays an object's history.
|
||||
# This PX displays an object's history
|
||||
pxHistory = Px('''
|
||||
<x var="startNumber=req.get('startNumber', 0);
|
||||
startNumber=int(startNumber);
|
||||
|
@ -703,14 +703,15 @@ class AbstractWrapper(object):
|
|||
applicable to instances of this class, and whose values are the
|
||||
(Zope) types of those indexes. If p_asList is True, it returns a
|
||||
list of tuples insteadof a dict.'''
|
||||
# Start with the standard indexes applicable for any Appy class.
|
||||
# Start with the standard indexes applicable for any Appy class
|
||||
if includeDefaults:
|
||||
res = defaultIndexes.copy()
|
||||
else:
|
||||
res = {}
|
||||
# Add the indexed fields found on this class
|
||||
for field in klass.__fields__:
|
||||
if not field.indexed or (field.name == 'title'): continue
|
||||
if not field.indexed or \
|
||||
(field.name in ('title', 'state', 'SearchableText')): continue
|
||||
n = field.name
|
||||
indexName = 'get%s%s' % (n[0].upper(), n[1:])
|
||||
res[indexName] = field.getIndexType()
|
||||
|
@ -928,17 +929,20 @@ class AbstractWrapper(object):
|
|||
return appyObj
|
||||
|
||||
def createFrom(self, fieldNameOrClass, other, noSecurity=False,
|
||||
executeMethods=True):
|
||||
executeMethods=True, exclude=()):
|
||||
'''Similar to m_create above, excepted that we will use another object
|
||||
(p_other) as base for filling in data for the object to create.'''
|
||||
(p_other) as base for filling in data for the object to create.
|
||||
p_exclude can list fields (by their names) that will not be
|
||||
copied on p_other. Note that this method does not perform a deep
|
||||
copy: objects linked via Ref fields from p_self will be
|
||||
referenced by the clone, but not themselves copied.'''
|
||||
# Get the field values to set from p_other and store it in a dict.
|
||||
# p_other may not be of the same class as p_self.
|
||||
params = {}
|
||||
for field in other.fields:
|
||||
# Skip the added attribute "state"
|
||||
if field.name == 'state': continue
|
||||
# Skip back references.
|
||||
if (field.type == 'Ref') and field.isBack: continue
|
||||
# Skip non persistent fields, back references and p_excluded fields
|
||||
if not field.persist or (field.name in exclude) or \
|
||||
((field.type == 'Ref') and field.isBack): continue
|
||||
params[field.name] = field.getCopyValue(other.o)
|
||||
return self.create(fieldNameOrClass, noSecurity=noSecurity,
|
||||
raiseOnWrongAttribute=False,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue