[gen] Bugfixes in the search machinery.

This commit is contained in:
Gaetan Delannay 2015-01-02 16:16:48 +01:00
parent cf2cbc52d6
commit 225ea927a4
16 changed files with 81 additions and 43 deletions

View file

@ -39,7 +39,7 @@ class Computed(Field):
masterValue=None, focus=False, historized=False, mapping=None,
label=None, sdefault='', scolspan=1, swidth=None, sheight=None,
context=None, view=None, xml=None):
# The Python method used for computing the field value, or a PX.
# The Python method used for computing the field value, or a PX
self.method = method
# A specific method for producing the formatted value of this field.
# This way, if, for example, the value is a DateTime instance which is
@ -53,7 +53,7 @@ class Computed(Field):
# Does field computation produce plain text or XHTML?
self.plainText = plainText
if isinstance(method, Px):
# When field computation is done with a PX, the result is XHTML.
# When field computation is done with a PX, the result is XHTML
self.plainText = False
# Determine default value for "show"
if show == None:
@ -62,7 +62,7 @@ class Computed(Field):
# in the xml layout.
show = self.plainText and ('view', 'result', 'xml') or \
('view', 'result')
# If method is a PX, its context can be given in p_context.
# If method is a PX, its context can be given in p_context
self.context = context
Field.__init__(self, None, multiplicity, default, show, page, group,
layouts, move, indexed, mustIndex, searchable,

View file

@ -275,9 +275,9 @@ class Pod(Field):
move, False, True, False, specificReadPermission,
specificWritePermission, width, height, None, colspan,
master, masterValue, focus, historized, mapping, label,
None, None, None, None, True, view, xml)
# Param "persist" is set to True but actually, persistence for a pod
# field is determined by freezing.
None, None, None, None, False, view, xml)
# Param "persist" is False, but actual persistence for this field is
# determined by freezing.
self.validable = False
def getExtension(self, template):
@ -603,7 +603,7 @@ class Pod(Field):
when no p_upload file is specified), if the freezing fails we try to
freeze the odt version, which is more robust because it does not
require calling LibreOffice.'''
# Security check.
# Security check
if not noSecurity and \
(format not in self.getFreezeFormats(obj, template)):
raise Exception(self.UNAUTHORIZED)
@ -613,10 +613,10 @@ class Pod(Field):
fileName = self.getFreezeName(template, format)
result = os.path.join(dbFolder, folder, fileName)
if os.path.exists(result):
prefix = upload and 'Freeze (upload)' or 'Freeze'
prefix = upload and 'freeze (upload)' or 'freeze'
obj.log('%s: overwriting %s...' % (prefix, result))
if not upload:
# Generate the document.
# Generate the document
doc = self.getValue(obj, template=template, format=format,
result=result)
if isinstance(doc, basestring):
@ -640,7 +640,7 @@ class Pod(Field):
raise Exception(self.FREEZE_FATAL_ERROR)
obj.log('freezed at %s.' % result)
else:
# Store the uploaded file in the database.
# Store the uploaded file in the database
f = file(result, 'wb')
doc = FileInfo(result, inDb=False)
doc.replicateFile(upload, f)

View file

@ -2,6 +2,7 @@
indexed.'''
# ------------------------------------------------------------------------------
from appy.gen.utils import splitIntoWords
from appy.shared.xml_parser import XmlParser
from appy.shared.utils import normalizeText
@ -68,23 +69,6 @@ def updateIndexes(installer, indexInfo):
catalog.reindexIndex(indexName, installer.app.REQUEST)
logger.info('Done.')
# ------------------------------------------------------------------------------
def splitIntoWords(text, ignore=2):
'''Split the cleaned index value p_text into words (returns a list of
words). Words whose length is below p_ignore are ignored, excepted digits
which are always kept. Duplicate words are removed (result is a set and
not a list).'''
# Split p_text into words
res = text.split()
# Remove shorter words not being figures
i = len(res) - 1
while i > -1:
if (len(res[i]) <= ignore) and not res[i].isdigit():
del res[i]
i -= 1
# Remove duplicates
return set(res)
# ------------------------------------------------------------------------------
class XhtmlTextExtractor(XmlParser):
'''Extracts text from XHTML.'''

View file

@ -141,7 +141,7 @@ class ZopeInstaller:
wrapperClass = tool.getAppyClass(className, wrapper=True)
indexInfo.update(wrapperClass.getIndexes(includeDefaults=False))
updateIndexes(self, indexInfo)
# Re-index index "SearchableText", wrongly defined for Appy < 0.8.3.
# Re-index index "SearchableText", wrongly defined for Appy < 0.8.3
stIndex = catalog.Indexes['SearchableText']
if stIndex.indexSize() == 0:
self.logger.info('reindexing SearchableText...')
@ -283,13 +283,13 @@ class ZopeInstaller:
# "select" field, because it will be necessary for displaying the
# translated state name.
state = gen.String(validator=gen.Selection('listStates'),
show='result')
show='result', persist=False, indexed=True)
state.init('state', None, 'workflow')
setattr(wrapperClass, 'state', state)
# Special field "SearchableText" must be added fot every class and
# will allow to display a search widget for entering keywords for
# searhing in index "SearchableText".
searchable = gen.String(show=False)
searchable = gen.String(show=False, persist=False, indexed=True)
searchable.init('SearchableText', None, 'appy')
setattr(wrapperClass, 'SearchableText', searchable)
# Set field "__fields__" on the wrapper class

View file

@ -1037,6 +1037,7 @@ class BaseMixin:
'''Gets the i18n label for p_name (which can denote a state or a
transition), or for the current object state if p_name is None.'''
name = name or self.State()
if name == 'create_from_predecessor': return name
return '%s_%s' % (self.getWorkflow(name=True), name)
def getTransitions(self, includeFake=True, includeNotShowable=False,

View file

@ -515,6 +515,10 @@ msgstr ""
msgid "action_comment"
msgstr ""
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr ""
#. Default: "Mon"
msgid "day_Mon_short"
msgstr ""

View file

@ -515,6 +515,10 @@ msgstr ""
msgid "action_comment"
msgstr ""
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr ""
#. Default: "Mon"
msgid "day_Mon_short"
msgstr ""

View file

@ -515,6 +515,10 @@ msgstr ""
msgid "action_comment"
msgstr ""
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr ""
#. Default: "Mon"
msgid "day_Mon_short"
msgstr ""

View file

@ -516,6 +516,10 @@ msgstr "Date"
msgid "action_comment"
msgstr "Comment"
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr "Create from a delayed item"
#. Default: "Mon"
msgid "day_Mon_short"
msgstr "Mon"

View file

@ -515,6 +515,10 @@ msgstr ""
msgid "action_comment"
msgstr ""
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr ""
#. Default: "Mon"
msgid "day_Mon_short"
msgstr ""

View file

@ -516,6 +516,10 @@ msgstr "Date"
msgid "action_comment"
msgstr "Commentaire"
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr "Créer depuis un point reporté"
#. Default: "Mon"
msgid "day_Mon_short"
msgstr "Lun"

View file

@ -515,6 +515,10 @@ msgstr ""
msgid "action_comment"
msgstr ""
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr ""
#. Default: "Mon"
msgid "day_Mon_short"
msgstr ""

View file

@ -515,6 +515,10 @@ msgstr "Datum"
msgid "action_comment"
msgstr "Commentaar"
#. Default: "Create from a delayed item"
msgid "create_from_predecessor"
msgstr ""
#. Default: "Mon"
msgid "day_Mon_short"
msgstr "Maa"

View file

@ -91,18 +91,35 @@ class SomeObjects:
else: getMethod = 'getObject'
self.objects = [getattr(b, getMethod)() for b in brains]
# ------------------------------------------------------------------------------
def splitIntoWords(text, ignore=2):
'''Split the cleaned index value p_text into words (returns a list of
words). Words whose length is below p_ignore are ignored, excepted digits
which are always kept. Duplicate words are removed (result is a set and
not a list).'''
# Split p_text into words
res = text.split()
# Remove shorter words not being figures
i = len(res) - 1
while i > -1:
if (len(res[i]) <= ignore) and not res[i].isdigit():
del res[i]
i -= 1
# Remove duplicates
return set(res)
# ------------------------------------------------------------------------------
class Keywords:
'''This class allows to handle keywords that a user enters and that will be
used as basis for performing requests in a TextIndex/XhtmlIndex.'''
toRemove = '?-+*()'
def __init__(self, keywords, operator='AND'):
# Clean the p_keywords that the user has entered.
def __init__(self, keywords, operator='AND', ignore=2):
# Clean the p_keywords that the user has entered
words = sutils.normalizeText(keywords)
if words == '*': words = ''
for c in self.toRemove: words = words.replace(c, ' ')
self.keywords = words.split()
self.keywords = splitIntoWords(words, ignore=ignore)
# Store the operator to apply to the keywords (AND or OR)
self.operator = operator

View file

@ -302,7 +302,7 @@ class AbstractWrapper(object):
# PXs for rendering graphical elements tied to a given object
# --------------------------------------------------------------------------
# This PX displays an object's history.
# This PX displays an object's history
pxHistory = Px('''
<x var="startNumber=req.get('startNumber', 0);
startNumber=int(startNumber);
@ -703,14 +703,15 @@ class AbstractWrapper(object):
applicable to instances of this class, and whose values are the
(Zope) types of those indexes. If p_asList is True, it returns a
list of tuples insteadof a dict.'''
# Start with the standard indexes applicable for any Appy class.
# Start with the standard indexes applicable for any Appy class
if includeDefaults:
res = defaultIndexes.copy()
else:
res = {}
# Add the indexed fields found on this class
for field in klass.__fields__:
if not field.indexed or (field.name == 'title'): continue
if not field.indexed or \
(field.name in ('title', 'state', 'SearchableText')): continue
n = field.name
indexName = 'get%s%s' % (n[0].upper(), n[1:])
res[indexName] = field.getIndexType()
@ -928,17 +929,20 @@ class AbstractWrapper(object):
return appyObj
def createFrom(self, fieldNameOrClass, other, noSecurity=False,
executeMethods=True):
executeMethods=True, exclude=()):
'''Similar to m_create above, excepted that we will use another object
(p_other) as base for filling in data for the object to create.'''
(p_other) as base for filling in data for the object to create.
p_exclude can list fields (by their names) that will not be
copied on p_other. Note that this method does not perform a deep
copy: objects linked via Ref fields from p_self will be
referenced by the clone, but not themselves copied.'''
# Get the field values to set from p_other and store it in a dict.
# p_other may not be of the same class as p_self.
params = {}
for field in other.fields:
# Skip the added attribute "state"
if field.name == 'state': continue
# Skip back references.
if (field.type == 'Ref') and field.isBack: continue
# Skip non persistent fields, back references and p_excluded fields
if not field.persist or (field.name in exclude) or \
((field.type == 'Ref') and field.isBack): continue
params[field.name] = field.getCopyValue(other.o)
return self.create(fieldNameOrClass, noSecurity=noSecurity,
raiseOnWrongAttribute=False,

View file

@ -217,7 +217,7 @@ def executeCommand(cmd):
return res
# ------------------------------------------------------------------------------
charsIgnore = u'.,:;*+=~?%^\'"<>{}[]|\t\\°'
charsIgnore = u'.,:;*+=~?%^\'"<>{}[]|\t\\°-'
fileNameIgnore = charsIgnore + u' $£€/'
extractIgnore = charsIgnore + '()'
alphaRex = re.compile('[a-zA-Z]')