[gen] Bugfixes in the search machinery.
This commit is contained in:
parent
cf2cbc52d6
commit
225ea927a4
|
@ -39,7 +39,7 @@ class Computed(Field):
|
||||||
masterValue=None, focus=False, historized=False, mapping=None,
|
masterValue=None, focus=False, historized=False, mapping=None,
|
||||||
label=None, sdefault='', scolspan=1, swidth=None, sheight=None,
|
label=None, sdefault='', scolspan=1, swidth=None, sheight=None,
|
||||||
context=None, view=None, xml=None):
|
context=None, view=None, xml=None):
|
||||||
# The Python method used for computing the field value, or a PX.
|
# The Python method used for computing the field value, or a PX
|
||||||
self.method = method
|
self.method = method
|
||||||
# A specific method for producing the formatted value of this field.
|
# A specific method for producing the formatted value of this field.
|
||||||
# This way, if, for example, the value is a DateTime instance which is
|
# This way, if, for example, the value is a DateTime instance which is
|
||||||
|
@ -53,7 +53,7 @@ class Computed(Field):
|
||||||
# Does field computation produce plain text or XHTML?
|
# Does field computation produce plain text or XHTML?
|
||||||
self.plainText = plainText
|
self.plainText = plainText
|
||||||
if isinstance(method, Px):
|
if isinstance(method, Px):
|
||||||
# When field computation is done with a PX, the result is XHTML.
|
# When field computation is done with a PX, the result is XHTML
|
||||||
self.plainText = False
|
self.plainText = False
|
||||||
# Determine default value for "show"
|
# Determine default value for "show"
|
||||||
if show == None:
|
if show == None:
|
||||||
|
@ -62,7 +62,7 @@ class Computed(Field):
|
||||||
# in the xml layout.
|
# in the xml layout.
|
||||||
show = self.plainText and ('view', 'result', 'xml') or \
|
show = self.plainText and ('view', 'result', 'xml') or \
|
||||||
('view', 'result')
|
('view', 'result')
|
||||||
# If method is a PX, its context can be given in p_context.
|
# If method is a PX, its context can be given in p_context
|
||||||
self.context = context
|
self.context = context
|
||||||
Field.__init__(self, None, multiplicity, default, show, page, group,
|
Field.__init__(self, None, multiplicity, default, show, page, group,
|
||||||
layouts, move, indexed, mustIndex, searchable,
|
layouts, move, indexed, mustIndex, searchable,
|
||||||
|
|
|
@ -275,9 +275,9 @@ class Pod(Field):
|
||||||
move, False, True, False, specificReadPermission,
|
move, False, True, False, specificReadPermission,
|
||||||
specificWritePermission, width, height, None, colspan,
|
specificWritePermission, width, height, None, colspan,
|
||||||
master, masterValue, focus, historized, mapping, label,
|
master, masterValue, focus, historized, mapping, label,
|
||||||
None, None, None, None, True, view, xml)
|
None, None, None, None, False, view, xml)
|
||||||
# Param "persist" is set to True but actually, persistence for a pod
|
# Param "persist" is False, but actual persistence for this field is
|
||||||
# field is determined by freezing.
|
# determined by freezing.
|
||||||
self.validable = False
|
self.validable = False
|
||||||
|
|
||||||
def getExtension(self, template):
|
def getExtension(self, template):
|
||||||
|
@ -603,7 +603,7 @@ class Pod(Field):
|
||||||
when no p_upload file is specified), if the freezing fails we try to
|
when no p_upload file is specified), if the freezing fails we try to
|
||||||
freeze the odt version, which is more robust because it does not
|
freeze the odt version, which is more robust because it does not
|
||||||
require calling LibreOffice.'''
|
require calling LibreOffice.'''
|
||||||
# Security check.
|
# Security check
|
||||||
if not noSecurity and \
|
if not noSecurity and \
|
||||||
(format not in self.getFreezeFormats(obj, template)):
|
(format not in self.getFreezeFormats(obj, template)):
|
||||||
raise Exception(self.UNAUTHORIZED)
|
raise Exception(self.UNAUTHORIZED)
|
||||||
|
@ -613,10 +613,10 @@ class Pod(Field):
|
||||||
fileName = self.getFreezeName(template, format)
|
fileName = self.getFreezeName(template, format)
|
||||||
result = os.path.join(dbFolder, folder, fileName)
|
result = os.path.join(dbFolder, folder, fileName)
|
||||||
if os.path.exists(result):
|
if os.path.exists(result):
|
||||||
prefix = upload and 'Freeze (upload)' or 'Freeze'
|
prefix = upload and 'freeze (upload)' or 'freeze'
|
||||||
obj.log('%s: overwriting %s...' % (prefix, result))
|
obj.log('%s: overwriting %s...' % (prefix, result))
|
||||||
if not upload:
|
if not upload:
|
||||||
# Generate the document.
|
# Generate the document
|
||||||
doc = self.getValue(obj, template=template, format=format,
|
doc = self.getValue(obj, template=template, format=format,
|
||||||
result=result)
|
result=result)
|
||||||
if isinstance(doc, basestring):
|
if isinstance(doc, basestring):
|
||||||
|
@ -640,7 +640,7 @@ class Pod(Field):
|
||||||
raise Exception(self.FREEZE_FATAL_ERROR)
|
raise Exception(self.FREEZE_FATAL_ERROR)
|
||||||
obj.log('freezed at %s.' % result)
|
obj.log('freezed at %s.' % result)
|
||||||
else:
|
else:
|
||||||
# Store the uploaded file in the database.
|
# Store the uploaded file in the database
|
||||||
f = file(result, 'wb')
|
f = file(result, 'wb')
|
||||||
doc = FileInfo(result, inDb=False)
|
doc = FileInfo(result, inDb=False)
|
||||||
doc.replicateFile(upload, f)
|
doc.replicateFile(upload, f)
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
indexed.'''
|
indexed.'''
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
from appy.gen.utils import splitIntoWords
|
||||||
from appy.shared.xml_parser import XmlParser
|
from appy.shared.xml_parser import XmlParser
|
||||||
from appy.shared.utils import normalizeText
|
from appy.shared.utils import normalizeText
|
||||||
|
|
||||||
|
@ -68,23 +69,6 @@ def updateIndexes(installer, indexInfo):
|
||||||
catalog.reindexIndex(indexName, installer.app.REQUEST)
|
catalog.reindexIndex(indexName, installer.app.REQUEST)
|
||||||
logger.info('Done.')
|
logger.info('Done.')
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
def splitIntoWords(text, ignore=2):
|
|
||||||
'''Split the cleaned index value p_text into words (returns a list of
|
|
||||||
words). Words whose length is below p_ignore are ignored, excepted digits
|
|
||||||
which are always kept. Duplicate words are removed (result is a set and
|
|
||||||
not a list).'''
|
|
||||||
# Split p_text into words
|
|
||||||
res = text.split()
|
|
||||||
# Remove shorter words not being figures
|
|
||||||
i = len(res) - 1
|
|
||||||
while i > -1:
|
|
||||||
if (len(res[i]) <= ignore) and not res[i].isdigit():
|
|
||||||
del res[i]
|
|
||||||
i -= 1
|
|
||||||
# Remove duplicates
|
|
||||||
return set(res)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
class XhtmlTextExtractor(XmlParser):
|
class XhtmlTextExtractor(XmlParser):
|
||||||
'''Extracts text from XHTML.'''
|
'''Extracts text from XHTML.'''
|
||||||
|
|
|
@ -141,7 +141,7 @@ class ZopeInstaller:
|
||||||
wrapperClass = tool.getAppyClass(className, wrapper=True)
|
wrapperClass = tool.getAppyClass(className, wrapper=True)
|
||||||
indexInfo.update(wrapperClass.getIndexes(includeDefaults=False))
|
indexInfo.update(wrapperClass.getIndexes(includeDefaults=False))
|
||||||
updateIndexes(self, indexInfo)
|
updateIndexes(self, indexInfo)
|
||||||
# Re-index index "SearchableText", wrongly defined for Appy < 0.8.3.
|
# Re-index index "SearchableText", wrongly defined for Appy < 0.8.3
|
||||||
stIndex = catalog.Indexes['SearchableText']
|
stIndex = catalog.Indexes['SearchableText']
|
||||||
if stIndex.indexSize() == 0:
|
if stIndex.indexSize() == 0:
|
||||||
self.logger.info('reindexing SearchableText...')
|
self.logger.info('reindexing SearchableText...')
|
||||||
|
@ -283,13 +283,13 @@ class ZopeInstaller:
|
||||||
# "select" field, because it will be necessary for displaying the
|
# "select" field, because it will be necessary for displaying the
|
||||||
# translated state name.
|
# translated state name.
|
||||||
state = gen.String(validator=gen.Selection('listStates'),
|
state = gen.String(validator=gen.Selection('listStates'),
|
||||||
show='result')
|
show='result', persist=False, indexed=True)
|
||||||
state.init('state', None, 'workflow')
|
state.init('state', None, 'workflow')
|
||||||
setattr(wrapperClass, 'state', state)
|
setattr(wrapperClass, 'state', state)
|
||||||
# Special field "SearchableText" must be added fot every class and
|
# Special field "SearchableText" must be added fot every class and
|
||||||
# will allow to display a search widget for entering keywords for
|
# will allow to display a search widget for entering keywords for
|
||||||
# searhing in index "SearchableText".
|
# searhing in index "SearchableText".
|
||||||
searchable = gen.String(show=False)
|
searchable = gen.String(show=False, persist=False, indexed=True)
|
||||||
searchable.init('SearchableText', None, 'appy')
|
searchable.init('SearchableText', None, 'appy')
|
||||||
setattr(wrapperClass, 'SearchableText', searchable)
|
setattr(wrapperClass, 'SearchableText', searchable)
|
||||||
# Set field "__fields__" on the wrapper class
|
# Set field "__fields__" on the wrapper class
|
||||||
|
|
|
@ -1037,6 +1037,7 @@ class BaseMixin:
|
||||||
'''Gets the i18n label for p_name (which can denote a state or a
|
'''Gets the i18n label for p_name (which can denote a state or a
|
||||||
transition), or for the current object state if p_name is None.'''
|
transition), or for the current object state if p_name is None.'''
|
||||||
name = name or self.State()
|
name = name or self.State()
|
||||||
|
if name == 'create_from_predecessor': return name
|
||||||
return '%s_%s' % (self.getWorkflow(name=True), name)
|
return '%s_%s' % (self.getWorkflow(name=True), name)
|
||||||
|
|
||||||
def getTransitions(self, includeFake=True, includeNotShowable=False,
|
def getTransitions(self, includeFake=True, includeNotShowable=False,
|
||||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
|
@ -516,6 +516,10 @@ msgstr "Date"
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr "Comment"
|
msgstr "Comment"
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr "Create from a delayed item"
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr "Mon"
|
msgstr "Mon"
|
||||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
|
@ -516,6 +516,10 @@ msgstr "Date"
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr "Commentaire"
|
msgstr "Commentaire"
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr "Créer depuis un point reporté"
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr "Lun"
|
msgstr "Lun"
|
||||||
|
|
|
@ -515,6 +515,10 @@ msgstr ""
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
|
@ -515,6 +515,10 @@ msgstr "Datum"
|
||||||
msgid "action_comment"
|
msgid "action_comment"
|
||||||
msgstr "Commentaar"
|
msgstr "Commentaar"
|
||||||
|
|
||||||
|
#. Default: "Create from a delayed item"
|
||||||
|
msgid "create_from_predecessor"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#. Default: "Mon"
|
#. Default: "Mon"
|
||||||
msgid "day_Mon_short"
|
msgid "day_Mon_short"
|
||||||
msgstr "Maa"
|
msgstr "Maa"
|
||||||
|
|
23
gen/utils.py
23
gen/utils.py
|
@ -91,18 +91,35 @@ class SomeObjects:
|
||||||
else: getMethod = 'getObject'
|
else: getMethod = 'getObject'
|
||||||
self.objects = [getattr(b, getMethod)() for b in brains]
|
self.objects = [getattr(b, getMethod)() for b in brains]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
def splitIntoWords(text, ignore=2):
|
||||||
|
'''Split the cleaned index value p_text into words (returns a list of
|
||||||
|
words). Words whose length is below p_ignore are ignored, excepted digits
|
||||||
|
which are always kept. Duplicate words are removed (result is a set and
|
||||||
|
not a list).'''
|
||||||
|
# Split p_text into words
|
||||||
|
res = text.split()
|
||||||
|
# Remove shorter words not being figures
|
||||||
|
i = len(res) - 1
|
||||||
|
while i > -1:
|
||||||
|
if (len(res[i]) <= ignore) and not res[i].isdigit():
|
||||||
|
del res[i]
|
||||||
|
i -= 1
|
||||||
|
# Remove duplicates
|
||||||
|
return set(res)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
class Keywords:
|
class Keywords:
|
||||||
'''This class allows to handle keywords that a user enters and that will be
|
'''This class allows to handle keywords that a user enters and that will be
|
||||||
used as basis for performing requests in a TextIndex/XhtmlIndex.'''
|
used as basis for performing requests in a TextIndex/XhtmlIndex.'''
|
||||||
|
|
||||||
toRemove = '?-+*()'
|
toRemove = '?-+*()'
|
||||||
def __init__(self, keywords, operator='AND'):
|
def __init__(self, keywords, operator='AND', ignore=2):
|
||||||
# Clean the p_keywords that the user has entered.
|
# Clean the p_keywords that the user has entered
|
||||||
words = sutils.normalizeText(keywords)
|
words = sutils.normalizeText(keywords)
|
||||||
if words == '*': words = ''
|
if words == '*': words = ''
|
||||||
for c in self.toRemove: words = words.replace(c, ' ')
|
for c in self.toRemove: words = words.replace(c, ' ')
|
||||||
self.keywords = words.split()
|
self.keywords = splitIntoWords(words, ignore=ignore)
|
||||||
# Store the operator to apply to the keywords (AND or OR)
|
# Store the operator to apply to the keywords (AND or OR)
|
||||||
self.operator = operator
|
self.operator = operator
|
||||||
|
|
||||||
|
|
|
@ -302,7 +302,7 @@ class AbstractWrapper(object):
|
||||||
# PXs for rendering graphical elements tied to a given object
|
# PXs for rendering graphical elements tied to a given object
|
||||||
# --------------------------------------------------------------------------
|
# --------------------------------------------------------------------------
|
||||||
|
|
||||||
# This PX displays an object's history.
|
# This PX displays an object's history
|
||||||
pxHistory = Px('''
|
pxHistory = Px('''
|
||||||
<x var="startNumber=req.get('startNumber', 0);
|
<x var="startNumber=req.get('startNumber', 0);
|
||||||
startNumber=int(startNumber);
|
startNumber=int(startNumber);
|
||||||
|
@ -703,14 +703,15 @@ class AbstractWrapper(object):
|
||||||
applicable to instances of this class, and whose values are the
|
applicable to instances of this class, and whose values are the
|
||||||
(Zope) types of those indexes. If p_asList is True, it returns a
|
(Zope) types of those indexes. If p_asList is True, it returns a
|
||||||
list of tuples insteadof a dict.'''
|
list of tuples insteadof a dict.'''
|
||||||
# Start with the standard indexes applicable for any Appy class.
|
# Start with the standard indexes applicable for any Appy class
|
||||||
if includeDefaults:
|
if includeDefaults:
|
||||||
res = defaultIndexes.copy()
|
res = defaultIndexes.copy()
|
||||||
else:
|
else:
|
||||||
res = {}
|
res = {}
|
||||||
# Add the indexed fields found on this class
|
# Add the indexed fields found on this class
|
||||||
for field in klass.__fields__:
|
for field in klass.__fields__:
|
||||||
if not field.indexed or (field.name == 'title'): continue
|
if not field.indexed or \
|
||||||
|
(field.name in ('title', 'state', 'SearchableText')): continue
|
||||||
n = field.name
|
n = field.name
|
||||||
indexName = 'get%s%s' % (n[0].upper(), n[1:])
|
indexName = 'get%s%s' % (n[0].upper(), n[1:])
|
||||||
res[indexName] = field.getIndexType()
|
res[indexName] = field.getIndexType()
|
||||||
|
@ -928,17 +929,20 @@ class AbstractWrapper(object):
|
||||||
return appyObj
|
return appyObj
|
||||||
|
|
||||||
def createFrom(self, fieldNameOrClass, other, noSecurity=False,
|
def createFrom(self, fieldNameOrClass, other, noSecurity=False,
|
||||||
executeMethods=True):
|
executeMethods=True, exclude=()):
|
||||||
'''Similar to m_create above, excepted that we will use another object
|
'''Similar to m_create above, excepted that we will use another object
|
||||||
(p_other) as base for filling in data for the object to create.'''
|
(p_other) as base for filling in data for the object to create.
|
||||||
|
p_exclude can list fields (by their names) that will not be
|
||||||
|
copied on p_other. Note that this method does not perform a deep
|
||||||
|
copy: objects linked via Ref fields from p_self will be
|
||||||
|
referenced by the clone, but not themselves copied.'''
|
||||||
# Get the field values to set from p_other and store it in a dict.
|
# Get the field values to set from p_other and store it in a dict.
|
||||||
# p_other may not be of the same class as p_self.
|
# p_other may not be of the same class as p_self.
|
||||||
params = {}
|
params = {}
|
||||||
for field in other.fields:
|
for field in other.fields:
|
||||||
# Skip the added attribute "state"
|
# Skip non persistent fields, back references and p_excluded fields
|
||||||
if field.name == 'state': continue
|
if not field.persist or (field.name in exclude) or \
|
||||||
# Skip back references.
|
((field.type == 'Ref') and field.isBack): continue
|
||||||
if (field.type == 'Ref') and field.isBack: continue
|
|
||||||
params[field.name] = field.getCopyValue(other.o)
|
params[field.name] = field.getCopyValue(other.o)
|
||||||
return self.create(fieldNameOrClass, noSecurity=noSecurity,
|
return self.create(fieldNameOrClass, noSecurity=noSecurity,
|
||||||
raiseOnWrongAttribute=False,
|
raiseOnWrongAttribute=False,
|
||||||
|
|
|
@ -217,7 +217,7 @@ def executeCommand(cmd):
|
||||||
return res
|
return res
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
charsIgnore = u'.,:;*+=~?%^\'’"<>{}[]|\t\\°'
|
charsIgnore = u'.,:;*+=~?%^\'’"<>{}[]|\t\\°-'
|
||||||
fileNameIgnore = charsIgnore + u' $£€/'
|
fileNameIgnore = charsIgnore + u' $£€/'
|
||||||
extractIgnore = charsIgnore + '()'
|
extractIgnore = charsIgnore + '()'
|
||||||
alphaRex = re.compile('[a-zA-Z]')
|
alphaRex = re.compile('[a-zA-Z]')
|
||||||
|
|
Loading…
Reference in a new issue