moved sources into subdirectory for easier setup
This commit is contained in:
parent
4f91a30fec
commit
d93f8ce937
190 changed files with 4 additions and 4 deletions
61
appy/shared/__init__.py
Normal file
61
appy/shared/__init__.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
import appy
|
||||
import os.path
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
appyPath = os.path.realpath(os.path.dirname(appy.__file__))
|
||||
od = 'application/vnd.oasis.opendocument'
|
||||
ms = 'application/vnd.openxmlformats-officedocument'
|
||||
ms2 = 'application/vnd.ms'
|
||||
|
||||
mimeTypes = {'odt': '%s.text' % od,
|
||||
'ods': '%s.spreadsheet' % od,
|
||||
'doc': 'application/msword',
|
||||
'rtf': 'text/rtf',
|
||||
'pdf': 'application/pdf'
|
||||
}
|
||||
mimeTypesExts = {
|
||||
'%s.text' % od: 'odt',
|
||||
'%s.spreadsheet' % od: 'ods',
|
||||
'application/msword': 'doc',
|
||||
'text/rtf': 'rtf',
|
||||
'application/pdf': 'pdf',
|
||||
'image/png': 'png',
|
||||
'image/jpeg': 'jpg',
|
||||
'image/pjpeg': 'jpg',
|
||||
'image/gif': 'gif',
|
||||
'%s.wordprocessingml.document' % ms: 'docx',
|
||||
'%s.spreadsheetml.sheet' % ms: 'xlsx',
|
||||
'%s.presentationml.presentation' % ms: 'pptx',
|
||||
'%s-excel' % ms2: 'xls',
|
||||
'%s-powerpoint' % ms2: 'ppt',
|
||||
'%s-word.document.macroEnabled.12' % ms2: 'docm',
|
||||
'%s-excel.sheet.macroEnabled.12' % ms2: 'xlsm',
|
||||
'%s-powerpoint.presentation.macroEnabled.12' % ms2: 'pptm'
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class UnmarshalledFile:
|
||||
'''Used for producing file objects from a marshalled Python object.'''
|
||||
def __init__(self):
|
||||
self.name = '' # The name of the file on disk
|
||||
self.mimeType = None # The MIME type of the file
|
||||
self.content = '' # The binary content of the file or a file object
|
||||
self.size = 0 # The length of the file in bytes.
|
||||
|
||||
class UnicodeBuffer:
|
||||
'''With StringIO class, I have tons of encoding problems. So I define a
|
||||
similar class here, that uses an internal unicode buffer.'''
|
||||
def __init__(self):
|
||||
self.buffer = []
|
||||
def write(self, s):
|
||||
if s == None: return
|
||||
if isinstance(s, str):
|
||||
self.buffer.append(s)
|
||||
elif isinstance(s, str):
|
||||
self.buffer.append(s.decode('utf-8'))
|
||||
else:
|
||||
self.buffer.append(str(s))
|
||||
def getValue(self):
|
||||
return ''.join(self.buffer)
|
||||
# ------------------------------------------------------------------------------
|
63
appy/shared/css.py
Normal file
63
appy/shared/css.py
Normal file
|
@ -0,0 +1,63 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
import re
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def parseStyleAttribute(value, asDict=False):
|
||||
'''Returns a list of CSS (name, value) pairs (or a dict if p_asDict is
|
||||
True), parsed from p_value, which holds the content of a HTML "style"
|
||||
tag.'''
|
||||
if asDict: res = {}
|
||||
else: res = []
|
||||
for attr in value.split(';'):
|
||||
if not attr.strip(): continue
|
||||
name, value = attr.split(':')
|
||||
if asDict: res[name.strip()] = value.strip()
|
||||
else: res.append( (name.strip(), value.strip()) )
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class CssValue:
|
||||
'''Represents a CSS value having unit "px" or "%": value and unit are
|
||||
extracted in attributes of the same name. If no unit is specified, "px"
|
||||
is assumed.'''
|
||||
valueRex = re.compile('(\d+)(%|px)?')
|
||||
|
||||
def __init__(self, value):
|
||||
value, unit = CssValue.valueRex.match(value)
|
||||
if not unit: unit = 'px'
|
||||
self.value = int(value)
|
||||
self.unit = unit
|
||||
def __str__(self): return '%d%s' % (self.value, self.unit)
|
||||
def __repr__(self): return self.__str__()
|
||||
|
||||
class CssStyles:
|
||||
'''This class represents a set of styles collected from:
|
||||
* an HTML "style" attribute;
|
||||
* other attributes like "width".
|
||||
'''
|
||||
# The list of CSS properties having a unit (px or %)
|
||||
withUnit = ('width', 'height')
|
||||
|
||||
def __init__(self, elem, attrs):
|
||||
'''Analyses styles as found in p_attrs and sets, for every found style,
|
||||
an attribute on self.'''
|
||||
# First, parse the "style" attr if present
|
||||
if attrs.has_key('style'):
|
||||
styles = parseStyleAttribute(attrs['style'], asDict=True)
|
||||
for name, value in styles.iteritems():
|
||||
if name in CssStyles.withUnit:
|
||||
value = CssValue(value)
|
||||
setattr(self, name.replace('-', ''), value)
|
||||
# Parse attributes "width" and "height" if present. But they will not
|
||||
# override corresponding attributes from the "styles" attributes if
|
||||
# found.
|
||||
for name in ('width', 'height'):
|
||||
if not hasattr(self, name) and attrs.has_key(name):
|
||||
setattr(self, name, CssValue(attrs[name]))
|
||||
|
||||
def __repr__(self):
|
||||
res = '<CSS'
|
||||
for name, value in self.__dict__.iteritems():
|
||||
res += ' %s:%s' % (name, value)
|
||||
return res + '>'
|
||||
# ------------------------------------------------------------------------------
|
295
appy/shared/csv_parser.py
Normal file
295
appy/shared/csv_parser.py
Normal file
|
@ -0,0 +1,295 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
# Appy is a framework for building applications in the Python language.
|
||||
# Copyright (C) 2007 Gaetan Delannay
|
||||
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
from appy import Object
|
||||
from appy.shared.utils import sequenceTypes
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
WRONG_LINE = 'Line number %d in file %s does not have the right number of ' \
|
||||
'fields.'
|
||||
|
||||
class CsvParser:
|
||||
'''This class reads a CSV file and creates a list of Python objects from it.
|
||||
The first line of the CSV file must declare the format of the following
|
||||
lines, which are 'data' lines. For example, if the first line of the file
|
||||
is
|
||||
|
||||
id,roles*,password
|
||||
|
||||
Then subsequent lines in the CSV need to conform to this syntax. Field
|
||||
separator will be the comma. Result of method 'parse' will be a list of
|
||||
Python objects, each one having attributes id, roles and password.
|
||||
Attributes declared with a star (like 'roles') are lists. An empty value
|
||||
will produce an empty list in the resulting object; several values need
|
||||
to be separated with the '+' sign. Here are some examples of valid 'data'
|
||||
lines for the first line above:
|
||||
|
||||
gdy,,
|
||||
gdy,MeetingManager,abc
|
||||
gdy,MeetingManager+MeetingMember,abc
|
||||
|
||||
In the first (and subsequent) line(s), you may choose among the following
|
||||
separators: , : ; |
|
||||
'''
|
||||
separators = [',', ':', ';', '|']
|
||||
typeLetters = {'i': int, 'f': float, 's': str, 'b': bool}
|
||||
def __init__(self, fileName, references={}, klass=None):
|
||||
self.fileName = fileName
|
||||
self.res = [] # The resulting list of Python objects.
|
||||
self.sep = None
|
||||
self.attributes = None # The list of attributes corresponding to
|
||||
# CSV columns.
|
||||
self.attributesFlags = None # Here we now if every attribute is a list
|
||||
# (True) of not (False).
|
||||
self.attributesTypes = None # Here we now the type of the attribute (if
|
||||
# the attribute is a list it denotes the type of every item in the
|
||||
# list): string, integer, float, boolean.
|
||||
self.references = references
|
||||
self.klass = klass # If a klass is given here, instead of creating
|
||||
# Object instances we will create instances of this class.
|
||||
# But be careful: we will not call the constructor of this class. We
|
||||
# will simply create instances of Object and dynamically
|
||||
# change the class of created instances to this class.
|
||||
|
||||
def identifySeparator(self, line):
|
||||
'''What is the separator used in this file?'''
|
||||
maxLength = 0
|
||||
res = None
|
||||
for sep in self.separators:
|
||||
newLength = len(line.split(sep))
|
||||
if newLength > maxLength:
|
||||
maxLength = newLength
|
||||
res = sep
|
||||
self.sep = res
|
||||
|
||||
def identifyAttributes(self, line):
|
||||
self.attributes = line.split(self.sep)
|
||||
self.attributesFlags = [False] * len(self.attributes)
|
||||
self.attributesTypes = [str] * len(self.attributes)
|
||||
i = -1
|
||||
for attr in self.attributes:
|
||||
i += 1
|
||||
# Is this attribute mono- or multi-valued?
|
||||
if attr.endswith('*'):
|
||||
self.attributesFlags[i] = True
|
||||
attrNoFlag = attr.strip('*')
|
||||
attrInfo = attrNoFlag.split('-')
|
||||
# What is the type of value(s) for this attribute ?
|
||||
if (len(attrInfo) == 2) and (attrInfo[1] in self.typeLetters):
|
||||
self.attributesTypes[i] = self.typeLetters[attrInfo[1]]
|
||||
# Remove trailing stars
|
||||
self.attributes = [a.strip('*').split('-')[0] for a in self.attributes]
|
||||
|
||||
def resolveReference(self, attrName, refId):
|
||||
'''Finds, in self.reference, the object having p_refId.'''
|
||||
refObjects, refAttrName = self.references[attrName]
|
||||
res = None
|
||||
for refObject in refObjects:
|
||||
if getattr(refObject, refAttrName) == refId:
|
||||
res = refObject
|
||||
break
|
||||
return res
|
||||
|
||||
def convertValue(self, value, basicType):
|
||||
'''Converts the atomic p_value which is a string into some other atomic
|
||||
Python type specified in p_basicType (int, float, ...).'''
|
||||
if (basicType != str) and (basicType != str):
|
||||
try:
|
||||
exec('res = %s' % str(value))
|
||||
except SyntaxError as se:
|
||||
res = None
|
||||
else:
|
||||
try:
|
||||
exec('res = """%s"""' % str(value))
|
||||
except SyntaxError as se:
|
||||
try:
|
||||
exec("res = '''%s'''" % str(value))
|
||||
except SyntaxError as se:
|
||||
res = None
|
||||
return res
|
||||
|
||||
def parse(self):
|
||||
'''Parses the CSV file named self.fileName and creates a list of
|
||||
corresponding Python objects (Object instances). Among object fields,
|
||||
some may be references. If it is the case, you may specify in
|
||||
p_references a dict of referred objects. The parser will then
|
||||
replace string values of some fields (which are supposed to be
|
||||
ids of referred objects) with corresponding objects in p_references.
|
||||
|
||||
How does this work? p_references must be a dictionary:
|
||||
- keys correspond to field names of the current object;
|
||||
- values are 2-tuples:
|
||||
* 1st value is the list of available referred objects;
|
||||
* 2nd value is the name of the attribute on those objects that
|
||||
stores their ID.
|
||||
'''
|
||||
# The first pass parses the file and creates the Python object
|
||||
f = file(self.fileName)
|
||||
firstLine = True
|
||||
lineNb = 0
|
||||
for line in f:
|
||||
lineNb += 1
|
||||
line = line.strip()
|
||||
if not line: continue
|
||||
if firstLine:
|
||||
# The first line declares the structure of the following 'data'
|
||||
# lines.
|
||||
self.identifySeparator(line)
|
||||
self.identifyAttributes(line)
|
||||
firstLine = False
|
||||
else:
|
||||
# Add an object corresponding to this line.
|
||||
lineObject = Object()
|
||||
if self.klass:
|
||||
lineObject.__class__ = self.klass
|
||||
i = -1
|
||||
# Do we get the right number of field values on this line ?
|
||||
attrValues = line.split(self.sep)
|
||||
if len(attrValues) != len(self.attributes):
|
||||
raise WRONG_LINE % (lineNb, self.fileName)
|
||||
for attrValue in line.split(self.sep):
|
||||
i += 1
|
||||
theValue = attrValue
|
||||
vType = self.attributesTypes[i]
|
||||
if self.attributesFlags[i]:
|
||||
# The attribute is multi-valued
|
||||
if not attrValue:
|
||||
theValue = []
|
||||
elif '+' in theValue:
|
||||
theValue = [self.convertValue(v, vType) \
|
||||
for v in attrValue.split('+')]
|
||||
else:
|
||||
theValue = [self.convertValue(theValue, vType)]
|
||||
else:
|
||||
# The attribute is mono-valued
|
||||
theValue = self.convertValue(theValue, vType)
|
||||
setattr(lineObject, self.attributes[i], theValue)
|
||||
self.res.append(lineObject)
|
||||
f.close()
|
||||
# The second pass resolves the p_references if any
|
||||
for attrName, refInfo in self.references.items():
|
||||
if attrName in self.attributes:
|
||||
# Replace ID with real object from p_references
|
||||
for obj in self.res:
|
||||
attrValue = getattr(obj, attrName)
|
||||
if isinstance(attrValue, list) or \
|
||||
isinstance(attrValue, tuple):
|
||||
# Multiple values to resolve
|
||||
newValue = []
|
||||
for v in attrValue:
|
||||
newValue.append(self.resolveReference(attrName,v))
|
||||
else:
|
||||
# Only one value to resolve
|
||||
newValue = self.resolveReference(attrName, attrValue)
|
||||
setattr(obj, attrName, newValue)
|
||||
return self.res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class CsvMarshaller:
|
||||
'''This class is responsible for producing a string, CSV-ready, line of data
|
||||
from a Appy object.'''
|
||||
undumpable = ('File', 'Action', 'Info', 'Pod')
|
||||
def __init__(self, at=None, sep=';', subSep=',', wrap='"',
|
||||
includeHeaders=True, include=None, exclude=None):
|
||||
# If specified, p_at is an opened file handler to the CSV file to fill
|
||||
self.at = at
|
||||
# The CSV field separator
|
||||
self.sep = sep
|
||||
# The sub-separator for multi-valued fields
|
||||
self.subSep = subSep
|
||||
# The "wrap" char will wrap any value that contains self.sep.
|
||||
self.wrap = wrap
|
||||
# Must we put field names as first line in the CSV?
|
||||
self.includeHeaders = includeHeaders
|
||||
# If p_include is given, it lists names of fields that will be included
|
||||
self.include = include
|
||||
# If p_exclude is given, it lists names of fields that will be excluded
|
||||
self.exclude = exclude
|
||||
|
||||
def marshallString(self, value):
|
||||
'''Produces a version of p_value that can be put in the CSV file.'''
|
||||
return value.replace('\r\n', ' ').replace('\n', ' ')
|
||||
|
||||
def marshallValue(self, field, value):
|
||||
'''Produces a version of p_value that can be dumped in a CSV file.'''
|
||||
if isinstance(value, str):
|
||||
# Format the string as a one-line CSV-ready value
|
||||
res = self.marshallString(value)
|
||||
elif type(value) in sequenceTypes:
|
||||
# Create a list of values, separated by a sub-separator.
|
||||
res = []
|
||||
for v in value:
|
||||
res.append(self.marshallValue(field, v))
|
||||
res = self.subSep.join(res)
|
||||
elif hasattr(value, 'klass') and hasattr(value, 'title'):
|
||||
# This is a reference to another object. Dump only its title.
|
||||
res = value.title
|
||||
elif value == None:
|
||||
# Empty string is more beautiful than 'None'
|
||||
res = ''
|
||||
else:
|
||||
res = str(value)
|
||||
# If self.sep is found among this value, we must wrap it with self.wrap
|
||||
if self.sep in res:
|
||||
# Double any wrapper char if present
|
||||
res = res.replace(self.wrap, '%s%s' % (self.wrap, self.wrap))
|
||||
# Wrap the value
|
||||
res = '%s%s%s' % (self.wrap, res, self.wrap)
|
||||
return res
|
||||
|
||||
def includeField(self, field):
|
||||
'''Must p_field be included in the result ?'''
|
||||
# Check self.include and self.exclude
|
||||
if self.include and field.name not in self.include: return False
|
||||
if self.exclude and field.name in self.exclude: return False
|
||||
# Check field type
|
||||
if field.type in self.undumpable: return False
|
||||
# Don't dump password fields
|
||||
if (field.type == 'String') and (field.format == 3): return False
|
||||
if (field.type == 'Ref') and field.isBack: return False
|
||||
if (field.type == 'Computed') and not field.plainText: return False
|
||||
return True
|
||||
|
||||
def marshall(self, obj):
|
||||
'''Creates the CSV line representing p_obj and dumps it in self.at if
|
||||
specified, or return it else.'''
|
||||
obj = obj.appy()
|
||||
res = []
|
||||
# Dump the header line if required, and if there is still no line
|
||||
# dumped in self.at.
|
||||
headers = []
|
||||
if self.includeHeaders and self.at and (self.at.tell() == 0):
|
||||
for field in obj.fields:
|
||||
if not self.includeField(field): continue
|
||||
headers.append(field.name)
|
||||
self.at.write(self.sep.join(headers))
|
||||
self.at.write('\n')
|
||||
# Dump the data line.
|
||||
for field in obj.fields:
|
||||
if not self.includeField(field): continue
|
||||
# Get the field value
|
||||
value = field.getValue(obj.o)
|
||||
value = self.marshallValue(field, value)
|
||||
res.append(value)
|
||||
res = self.sep.join(res)
|
||||
if self.at:
|
||||
self.at.write(res)
|
||||
self.at.write('\n')
|
||||
else: return res
|
||||
# ------------------------------------------------------------------------------
|
2901
appy/shared/data/BelgianCommunes.txt
Normal file
2901
appy/shared/data/BelgianCommunes.txt
Normal file
File diff suppressed because it is too large
Load diff
249
appy/shared/data/CountryCodesIso3166.1.txt
Normal file
249
appy/shared/data/CountryCodesIso3166.1.txt
Normal file
|
@ -0,0 +1,249 @@
|
|||
AFGHANISTAN;AF
|
||||
ÅLAND ISLANDS;AX
|
||||
ALBANIA;AL
|
||||
ALGERIA;DZ
|
||||
AMERICAN SAMOA;AS
|
||||
ANDORRA;AD
|
||||
ANGOLA;AO
|
||||
ANGUILLA;AI
|
||||
ANTARCTICA;AQ
|
||||
ANTIGUA AND BARBUDA;AG
|
||||
ARGENTINA;AR
|
||||
ARMENIA;AM
|
||||
ARUBA;AW
|
||||
AUSTRALIA;AU
|
||||
AUSTRIA;AT
|
||||
AZERBAIJAN;AZ
|
||||
BAHAMAS;BS
|
||||
BAHRAIN;BH
|
||||
BANGLADESH;BD
|
||||
BARBADOS;BB
|
||||
BELARUS;BY
|
||||
BELGIUM;BE
|
||||
BELIZE;BZ
|
||||
BENIN;BJ
|
||||
BERMUDA;BM
|
||||
BHUTAN;BT
|
||||
BOLIVIA, PLURINATIONAL STATE OF;BO
|
||||
BONAIRE, SINT EUSTATIUS AND SABA;BQ
|
||||
BOSNIA AND HERZEGOVINA;BA
|
||||
BOTSWANA;BW
|
||||
BOUVET ISLAND;BV
|
||||
BRAZIL;BR
|
||||
BRITISH INDIAN OCEAN TERRITORY;IO
|
||||
BRUNEI DARUSSALAM;BN
|
||||
BULGARIA;BG
|
||||
BURKINA FASO;BF
|
||||
BURUNDI;BI
|
||||
CAMBODIA;KH
|
||||
CAMEROON;CM
|
||||
CANADA;CA
|
||||
CAPE VERDE;CV
|
||||
CAYMAN ISLANDS;KY
|
||||
CENTRAL AFRICAN REPUBLIC;CF
|
||||
CHAD;TD
|
||||
CHILE;CL
|
||||
CHINA;CN
|
||||
CHRISTMAS ISLAND;CX
|
||||
COCOS (KEELING) ISLANDS;CC
|
||||
COLOMBIA;CO
|
||||
COMOROS;KM
|
||||
CONGO;CG
|
||||
CONGO, THE DEMOCRATIC REPUBLIC OF THE;CD
|
||||
COOK ISLANDS;CK
|
||||
COSTA RICA;CR
|
||||
CÔTE D'IVOIRE;CI
|
||||
CROATIA;HR
|
||||
CUBA;CU
|
||||
CURAÇAO;CW
|
||||
CYPRUS;CY
|
||||
CZECH REPUBLIC;CZ
|
||||
DENMARK;DK
|
||||
DJIBOUTI;DJ
|
||||
DOMINICA;DM
|
||||
DOMINICAN REPUBLIC;DO
|
||||
ECUADOR;EC
|
||||
EGYPT;EG
|
||||
EL SALVADOR;SV
|
||||
EQUATORIAL GUINEA;GQ
|
||||
ERITREA;ER
|
||||
ESTONIA;EE
|
||||
ETHIOPIA;ET
|
||||
FALKLAND ISLANDS (MALVINAS);FK
|
||||
FAROE ISLANDS;FO
|
||||
FIJI;FJ
|
||||
FINLAND;FI
|
||||
FRANCE;FR
|
||||
FRENCH GUIANA;GF
|
||||
FRENCH POLYNESIA;PF
|
||||
FRENCH SOUTHERN TERRITORIES;TF
|
||||
GABON;GA
|
||||
GAMBIA;GM
|
||||
GEORGIA;GE
|
||||
GERMANY;DE
|
||||
GHANA;GH
|
||||
GIBRALTAR;GI
|
||||
GREECE;GR
|
||||
GREENLAND;GL
|
||||
GRENADA;GD
|
||||
GUADELOUPE;GP
|
||||
GUAM;GU
|
||||
GUATEMALA;GT
|
||||
GUERNSEY;GG
|
||||
GUINEA;GN
|
||||
GUINEA-BISSAU;GW
|
||||
GUYANA;GY
|
||||
HAITI;HT
|
||||
HEARD ISLAND AND MCDONALD ISLANDS;HM
|
||||
HOLY SEE (VATICAN CITY STATE);VA
|
||||
HONDURAS;HN
|
||||
HONG KONG;HK
|
||||
HUNGARY;HU
|
||||
ICELAND;IS
|
||||
INDIA;IN
|
||||
INDONESIA;ID
|
||||
IRAN, ISLAMIC REPUBLIC OF;IR
|
||||
IRAQ;IQ
|
||||
IRELAND;IE
|
||||
ISLE OF MAN;IM
|
||||
ISRAEL;IL
|
||||
ITALY;IT
|
||||
JAMAICA;JM
|
||||
JAPAN;JP
|
||||
JERSEY;JE
|
||||
JORDAN;JO
|
||||
KAZAKHSTAN;KZ
|
||||
KENYA;KE
|
||||
KIRIBATI;KI
|
||||
KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF;KP
|
||||
KOREA, REPUBLIC OF;KR
|
||||
KUWAIT;KW
|
||||
KYRGYZSTAN;KG
|
||||
LAO PEOPLE'S DEMOCRATIC REPUBLIC;LA
|
||||
LATVIA;LV
|
||||
LEBANON;LB
|
||||
LESOTHO;LS
|
||||
LIBERIA;LR
|
||||
LIBYA;LY
|
||||
LIECHTENSTEIN;LI
|
||||
LITHUANIA;LT
|
||||
LUXEMBOURG;LU
|
||||
MACAO;MO
|
||||
MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF;MK
|
||||
MADAGASCAR;MG
|
||||
MALAWI;MW
|
||||
MALAYSIA;MY
|
||||
MALDIVES;MV
|
||||
MALI;ML
|
||||
MALTA;MT
|
||||
MARSHALL ISLANDS;MH
|
||||
MARTINIQUE;MQ
|
||||
MAURITANIA;MR
|
||||
MAURITIUS;MU
|
||||
MAYOTTE;YT
|
||||
MEXICO;MX
|
||||
MICRONESIA, FEDERATED STATES OF;FM
|
||||
MOLDOVA, REPUBLIC OF;MD
|
||||
MONACO;MC
|
||||
MONGOLIA;MN
|
||||
MONTENEGRO;ME
|
||||
MONTSERRAT;MS
|
||||
MOROCCO;MA
|
||||
MOZAMBIQUE;MZ
|
||||
MYANMAR;MM
|
||||
NAMIBIA;NA
|
||||
NAURU;NR
|
||||
NEPAL;NP
|
||||
NETHERLANDS;NL
|
||||
NEW CALEDONIA;NC
|
||||
NEW ZEALAND;NZ
|
||||
NICARAGUA;NI
|
||||
NIGER;NE
|
||||
NIGERIA;NG
|
||||
NIUE;NU
|
||||
NORFOLK ISLAND;NF
|
||||
NORTHERN MARIANA ISLANDS;MP
|
||||
NORWAY;NO
|
||||
OMAN;OM
|
||||
PAKISTAN;PK
|
||||
PALAU;PW
|
||||
PALESTINIAN TERRITORY, OCCUPIED;PS
|
||||
PANAMA;PA
|
||||
PAPUA NEW GUINEA;PG
|
||||
PARAGUAY;PY
|
||||
PERU;PE
|
||||
PHILIPPINES;PH
|
||||
PITCAIRN;PN
|
||||
POLAND;PL
|
||||
PORTUGAL;PT
|
||||
PUERTO RICO;PR
|
||||
QATAR;QA
|
||||
RÉUNION;RE
|
||||
ROMANIA;RO
|
||||
RUSSIAN FEDERATION;RU
|
||||
RWANDA;RW
|
||||
SAINT BARTHÉLEMY;BL
|
||||
SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA;SH
|
||||
SAINT KITTS AND NEVIS;KN
|
||||
SAINT LUCIA;LC
|
||||
SAINT MARTIN (FRENCH PART);MF
|
||||
SAINT PIERRE AND MIQUELON;PM
|
||||
SAINT VINCENT AND THE GRENADINES;VC
|
||||
SAMOA;WS
|
||||
SAN MARINO;SM
|
||||
SAO TOME AND PRINCIPE;ST
|
||||
SAUDI ARABIA;SA
|
||||
SENEGAL;SN
|
||||
SERBIA;RS
|
||||
SEYCHELLES;SC
|
||||
SIERRA LEONE;SL
|
||||
SINGAPORE;SG
|
||||
SINT MAARTEN (DUTCH PART);SX
|
||||
SLOVAKIA;SK
|
||||
SLOVENIA;SI
|
||||
SOLOMON ISLANDS;SB
|
||||
SOMALIA;SO
|
||||
SOUTH AFRICA;ZA
|
||||
SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS;GS
|
||||
SOUTH SUDAN;SS
|
||||
SPAIN;ES
|
||||
SRI LANKA;LK
|
||||
SUDAN;SD
|
||||
SURINAME;SR
|
||||
SVALBARD AND JAN MAYEN;SJ
|
||||
SWAZILAND;SZ
|
||||
SWEDEN;SE
|
||||
SWITZERLAND;CH
|
||||
SYRIAN ARAB REPUBLIC;SY
|
||||
TAIWAN, PROVINCE OF CHINA;TW
|
||||
TAJIKISTAN;TJ
|
||||
TANZANIA, UNITED REPUBLIC OF;TZ
|
||||
THAILAND;TH
|
||||
TIMOR-LESTE;TL
|
||||
TOGO;TG
|
||||
TOKELAU;TK
|
||||
TONGA;TO
|
||||
TRINIDAD AND TOBAGO;TT
|
||||
TUNISIA;TN
|
||||
TURKEY;TR
|
||||
TURKMENISTAN;TM
|
||||
TURKS AND CAICOS ISLANDS;TC
|
||||
TUVALU;TV
|
||||
UGANDA;UG
|
||||
UKRAINE;UA
|
||||
UNITED ARAB EMIRATES;AE
|
||||
UNITED KINGDOM;GB
|
||||
UNITED STATES;US
|
||||
UNITED STATES MINOR OUTLYING ISLANDS;UM
|
||||
URUGUAY;UY
|
||||
UZBEKISTAN;UZ
|
||||
VANUATU;VU
|
||||
VENEZUELA, BOLIVARIAN REPUBLIC OF;VE
|
||||
VIET NAM;VN
|
||||
VIRGIN ISLANDS, BRITISH;VG
|
||||
VIRGIN ISLANDS, U.S.;VI
|
||||
WALLIS AND FUTUNA;WF
|
||||
WESTERN SAHARA;EH
|
||||
YEMEN;YE
|
||||
ZAMBIA;ZM
|
||||
ZIMBABWE;ZW
|
485
appy/shared/data/LanguageCodesIso639.2.txt
Normal file
485
appy/shared/data/LanguageCodesIso639.2.txt
Normal file
|
@ -0,0 +1,485 @@
|
|||
aar||aa|Afar|afar
|
||||
abk||ab|Abkhazian|abkhaze
|
||||
ace|||Achinese|aceh
|
||||
ach|||Acoli|acoli
|
||||
ada|||Adangme|adangme
|
||||
ady|||Adyghe; Adygei|adyghé
|
||||
afa|||Afro-Asiatic languages|afro-asiatiques, langues
|
||||
afh|||Afrihili|afrihili
|
||||
afr||af|Afrikaans|afrikaans
|
||||
ain|||Ainu|aïnou
|
||||
aka||ak|Akan|akan
|
||||
akk|||Akkadian|akkadien
|
||||
alb|sqi|sq|Albanian|albanais
|
||||
ale|||Aleut|aléoute
|
||||
alg|||Algonquian languages|algonquines, langues
|
||||
alt|||Southern Altai|altai du Sud
|
||||
amh||am|Amharic|amharique
|
||||
ang|||English, Old (ca.450-1100)|anglo-saxon (ca.450-1100)
|
||||
anp|||Angika|angika
|
||||
apa|||Apache languages|apaches, langues
|
||||
ara||ar|Arabic|arabe
|
||||
arc|||Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)|araméen d'empire (700-300 BCE)
|
||||
arg||an|Aragonese|aragonais
|
||||
arm|hye|hy|Armenian|arménien
|
||||
arn|||Mapudungun; Mapuche|mapudungun; mapuche; mapuce
|
||||
arp|||Arapaho|arapaho
|
||||
art|||Artificial languages|artificielles, langues
|
||||
arw|||Arawak|arawak
|
||||
asm||as|Assamese|assamais
|
||||
ast|||Asturian; Bable; Leonese; Asturleonese|asturien; bable; léonais; asturoléonais
|
||||
ath|||Athapascan languages|athapascanes, langues
|
||||
aus|||Australian languages|australiennes, langues
|
||||
ava||av|Avaric|avar
|
||||
ave||ae|Avestan|avestique
|
||||
awa|||Awadhi|awadhi
|
||||
aym||ay|Aymara|aymara
|
||||
aze||az|Azerbaijani|azéri
|
||||
bad|||Banda languages|banda, langues
|
||||
bai|||Bamileke languages|bamiléké, langues
|
||||
bak||ba|Bashkir|bachkir
|
||||
bal|||Baluchi|baloutchi
|
||||
bam||bm|Bambara|bambara
|
||||
ban|||Balinese|balinais
|
||||
baq|eus|eu|Basque|basque
|
||||
bas|||Basa|basa
|
||||
bat|||Baltic languages|baltes, langues
|
||||
bej|||Beja; Bedawiyet|bedja
|
||||
bel||be|Belarusian|biélorusse
|
||||
bem|||Bemba|bemba
|
||||
ben||bn|Bengali|bengali
|
||||
ber|||Berber languages|berbères, langues
|
||||
bho|||Bhojpuri|bhojpuri
|
||||
bih||bh|Bihari|bihari
|
||||
bik|||Bikol|bikol
|
||||
bin|||Bini; Edo|bini; edo
|
||||
bis||bi|Bislama|bichlamar
|
||||
bla|||Siksika|blackfoot
|
||||
bnt|||Bantu (Other)|bantoues, autres langues
|
||||
bos||bs|Bosnian|bosniaque
|
||||
bra|||Braj|braj
|
||||
bre||br|Breton|breton
|
||||
btk|||Batak languages|batak, langues
|
||||
bua|||Buriat|bouriate
|
||||
bug|||Buginese|bugi
|
||||
bul||bg|Bulgarian|bulgare
|
||||
bur|mya|my|Burmese|birman
|
||||
byn|||Blin; Bilin|blin; bilen
|
||||
cad|||Caddo|caddo
|
||||
cai|||Central American Indian languages|amérindiennes de L'Amérique centrale, langues
|
||||
car|||Galibi Carib|karib; galibi; carib
|
||||
cat||ca|Catalan; Valencian|catalan; valencien
|
||||
cau|||Caucasian languages|caucasiennes, langues
|
||||
ceb|||Cebuano|cebuano
|
||||
cel|||Celtic languages|celtiques, langues; celtes, langues
|
||||
cha||ch|Chamorro|chamorro
|
||||
chb|||Chibcha|chibcha
|
||||
che||ce|Chechen|tchétchène
|
||||
chg|||Chagatai|djaghataï
|
||||
chi|zho|zh|Chinese|chinois
|
||||
chk|||Chuukese|chuuk
|
||||
chm|||Mari|mari
|
||||
chn|||Chinook jargon|chinook, jargon
|
||||
cho|||Choctaw|choctaw
|
||||
chp|||Chipewyan; Dene Suline|chipewyan
|
||||
chr|||Cherokee|cherokee
|
||||
chu||cu|Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic|slavon d'église; vieux slave; slavon liturgique; vieux bulgare
|
||||
chv||cv|Chuvash|tchouvache
|
||||
chy|||Cheyenne|cheyenne
|
||||
cmc|||Chamic languages|chames, langues
|
||||
cop|||Coptic|copte
|
||||
cor||kw|Cornish|cornique
|
||||
cos||co|Corsican|corse
|
||||
cpe|||Creoles and pidgins, English based|créoles et pidgins basés sur l'anglais
|
||||
cpf|||Creoles and pidgins, French-based |créoles et pidgins basés sur le français
|
||||
cpp|||Creoles and pidgins, Portuguese-based |créoles et pidgins basés sur le portugais
|
||||
cre||cr|Cree|cree
|
||||
crh|||Crimean Tatar; Crimean Turkish|tatar de Crimé
|
||||
crp|||Creoles and pidgins |créoles et pidgins
|
||||
csb|||Kashubian|kachoube
|
||||
cus|||Cushitic languages|couchitiques, langues
|
||||
cze|ces|cs|Czech|tchèque
|
||||
dak|||Dakota|dakota
|
||||
dan||da|Danish|danois
|
||||
dar|||Dargwa|dargwa
|
||||
day|||Land Dayak languages|dayak, langues
|
||||
del|||Delaware|delaware
|
||||
den|||Slave (Athapascan)|esclave (athapascan)
|
||||
dgr|||Dogrib|dogrib
|
||||
din|||Dinka|dinka
|
||||
div||dv|Divehi; Dhivehi; Maldivian|maldivien
|
||||
doi|||Dogri|dogri
|
||||
dra|||Dravidian languages|dravidiennes, langues
|
||||
dsb|||Lower Sorbian|bas-sorabe
|
||||
dua|||Duala|douala
|
||||
dum|||Dutch, Middle (ca.1050-1350)|néerlandais moyen (ca. 1050-1350)
|
||||
dut|nld|nl|Dutch; Flemish|néerlandais; flamand
|
||||
dyu|||Dyula|dioula
|
||||
dzo||dz|Dzongkha|dzongkha
|
||||
efi|||Efik|efik
|
||||
egy|||Egyptian (Ancient)|égyptien
|
||||
eka|||Ekajuk|ekajuk
|
||||
elx|||Elamite|élamite
|
||||
eng||en|English|anglais
|
||||
enm|||English, Middle (1100-1500)|anglais moyen (1100-1500)
|
||||
epo||eo|Esperanto|espéranto
|
||||
est||et|Estonian|estonien
|
||||
ewe||ee|Ewe|éwé
|
||||
ewo|||Ewondo|éwondo
|
||||
fan|||Fang|fang
|
||||
fao||fo|Faroese|féroïen
|
||||
fat|||Fanti|fanti
|
||||
fij||fj|Fijian|fidjien
|
||||
fil|||Filipino; Pilipino|filipino; pilipino
|
||||
fin||fi|Finnish|finnois
|
||||
fiu|||Finno-Ugrian languages|finno-ougriennes, langues
|
||||
fon|||Fon|fon
|
||||
fre|fra|fr|French|français
|
||||
frm|||French, Middle (ca.1400-1600)|français moyen (1400-1600)
|
||||
fro|||French, Old (842-ca.1400)|français ancien (842-ca.1400)
|
||||
frr|||Northern Frisian|frison septentrional
|
||||
frs|||Eastern Frisian|frison oriental
|
||||
fry||fy|Western Frisian|frison occidental
|
||||
ful||ff|Fulah|peul
|
||||
fur|||Friulian|frioulan
|
||||
gaa|||Ga|ga
|
||||
gay|||Gayo|gayo
|
||||
gba|||Gbaya|gbaya
|
||||
gem|||Germanic languages|germaniques, langues
|
||||
geo|kat|ka|Georgian|géorgien
|
||||
ger|deu|de|German|allemand
|
||||
gez|||Geez|guèze
|
||||
gil|||Gilbertese|kiribati
|
||||
gla||gd|Gaelic; Scottish Gaelic|gaélique; gaélique écossais
|
||||
gle||ga|Irish|irlandais
|
||||
glg||gl|Galician|galicien
|
||||
glv||gv|Manx|manx; mannois
|
||||
gmh|||German, Middle High (ca.1050-1500)|allemand, moyen haut (ca. 1050-1500)
|
||||
goh|||German, Old High (ca.750-1050)|allemand, vieux haut (ca. 750-1050)
|
||||
gon|||Gondi|gond
|
||||
gor|||Gorontalo|gorontalo
|
||||
got|||Gothic|gothique
|
||||
grb|||Grebo|grebo
|
||||
grc|||Greek, Ancient (to 1453)|grec ancien (jusqu'à 1453)
|
||||
gre|ell|el|Greek, Modern (1453-)|grec moderne (après 1453)
|
||||
grn||gn|Guarani|guarani
|
||||
gsw|||Swiss German; Alemannic; Alsatian|suisse alémanique; alémanique; alsacien
|
||||
guj||gu|Gujarati|goudjrati
|
||||
gwi|||Gwich'in|gwich'in
|
||||
hai|||Haida|haida
|
||||
hat||ht|Haitian; Haitian Creole|haïtien; créole haïtien
|
||||
hau||ha|Hausa|haoussa
|
||||
haw|||Hawaiian|hawaïen
|
||||
heb||he|Hebrew|hébreu
|
||||
her||hz|Herero|herero
|
||||
hil|||Hiligaynon|hiligaynon
|
||||
him|||Himachali|himachali
|
||||
hin||hi|Hindi|hindi
|
||||
hit|||Hittite|hittite
|
||||
hmn|||Hmong|hmong
|
||||
hmo||ho|Hiri Motu|hiri motu
|
||||
hrv||hr|Croatian|croate
|
||||
hsb|||Upper Sorbian|haut-sorabe
|
||||
hun||hu|Hungarian|hongrois
|
||||
hup|||Hupa|hupa
|
||||
iba|||Iban|iban
|
||||
ibo||ig|Igbo|igbo
|
||||
ice|isl|is|Icelandic|islandais
|
||||
ido||io|Ido|ido
|
||||
iii||ii|Sichuan Yi; Nuosu|yi de Sichuan
|
||||
ijo|||Ijo languages|ijo, langues
|
||||
iku||iu|Inuktitut|inuktitut
|
||||
ile||ie|Interlingue; Occidental|interlingue
|
||||
ilo|||Iloko|ilocano
|
||||
ina||ia|Interlingua (International Auxiliary Language Association)|interlingua (langue auxiliaire internationale)
|
||||
inc|||Indic languages|indo-aryennes, langues
|
||||
ind||id|Indonesian|indonésien
|
||||
ine|||Indo-European languages|indo-européennes, langues
|
||||
inh|||Ingush|ingouche
|
||||
ipk||ik|Inupiaq|inupiaq
|
||||
ira|||Iranian languages|iraniennes, langues
|
||||
iro|||Iroquoian languages|iroquoises, langues
|
||||
ita||it|Italian|italien
|
||||
jav||jv|Javanese|javanais
|
||||
jbo|||Lojban|lojban
|
||||
jpn||ja|Japanese|japonais
|
||||
jpr|||Judeo-Persian|judéo-persan
|
||||
jrb|||Judeo-Arabic|judéo-arabe
|
||||
kaa|||Kara-Kalpak|karakalpak
|
||||
kab|||Kabyle|kabyle
|
||||
kac|||Kachin; Jingpho|kachin; jingpho
|
||||
kal||kl|Kalaallisut; Greenlandic|groenlandais
|
||||
kam|||Kamba|kamba
|
||||
kan||kn|Kannada|kannada
|
||||
kar|||Karen languages|karen, langues
|
||||
kas||ks|Kashmiri|kashmiri
|
||||
kau||kr|Kanuri|kanouri
|
||||
kaw|||Kawi|kawi
|
||||
kaz||kk|Kazakh|kazakh
|
||||
kbd|||Kabardian|kabardien
|
||||
kha|||Khasi|khasi
|
||||
khi|||Khoisan languages|khoïsan, langues
|
||||
khm||km|Central Khmer|khmer central
|
||||
kho|||Khotanese; Sakan|khotanais; sakan
|
||||
kik||ki|Kikuyu; Gikuyu|kikuyu
|
||||
kin||rw|Kinyarwanda|rwanda
|
||||
kir||ky|Kirghiz; Kyrgyz|kirghiz
|
||||
kmb|||Kimbundu|kimbundu
|
||||
kok|||Konkani|konkani
|
||||
kom||kv|Komi|kom
|
||||
kon||kg|Kongo|kongo
|
||||
kor||ko|Korean|coréen
|
||||
kos|||Kosraean|kosrae
|
||||
kpe|||Kpelle|kpellé
|
||||
krc|||Karachay-Balkar|karatchai balkar
|
||||
krl|||Karelian|carélien
|
||||
kro|||Kru languages|krou, langues
|
||||
kru|||Kurukh|kurukh
|
||||
kua||kj|Kuanyama; Kwanyama|kuanyama; kwanyama
|
||||
kum|||Kumyk|koumyk
|
||||
kur||ku|Kurdish|kurde
|
||||
kut|||Kutenai|kutenai
|
||||
lad|||Ladino|judéo-espagnol
|
||||
lah|||Lahnda|lahnda
|
||||
lam|||Lamba|lamba
|
||||
lao||lo|Lao|lao
|
||||
lat||la|Latin|latin
|
||||
lav||lv|Latvian|letton
|
||||
lez|||Lezghian|lezghien
|
||||
lim||li|Limburgan; Limburger; Limburgish|limbourgeois
|
||||
lin||ln|Lingala|lingala
|
||||
lit||lt|Lithuanian|lituanien
|
||||
lol|||Mongo|mongo
|
||||
loz|||Lozi|lozi
|
||||
ltz||lb|Luxembourgish; Letzeburgesch|luxembourgeois
|
||||
lua|||Luba-Lulua|luba-lulua
|
||||
lub||lu|Luba-Katanga|luba-katanga
|
||||
lug||lg|Ganda|ganda
|
||||
lui|||Luiseno|luiseno
|
||||
lun|||Lunda|lunda
|
||||
luo|||Luo (Kenya and Tanzania)|luo (Kenya et Tanzanie)
|
||||
lus|||Lushai|lushai
|
||||
mac|mkd|mk|Macedonian|macédonien
|
||||
mad|||Madurese|madourais
|
||||
mag|||Magahi|magahi
|
||||
mah||mh|Marshallese|marshall
|
||||
mai|||Maithili|maithili
|
||||
mak|||Makasar|makassar
|
||||
mal||ml|Malayalam|malayalam
|
||||
man|||Mandingo|mandingue
|
||||
mao|mri|mi|Maori|maori
|
||||
map|||Austronesian languages|austronésiennes, langues
|
||||
mar||mr|Marathi|marathe
|
||||
mas|||Masai|massaï
|
||||
may|msa|ms|Malay|malais
|
||||
mdf|||Moksha|moksa
|
||||
mdr|||Mandar|mandar
|
||||
men|||Mende|mendé
|
||||
mga|||Irish, Middle (900-1200)|irlandais moyen (900-1200)
|
||||
mic|||Mi'kmaq; Micmac|mi'kmaq; micmac
|
||||
min|||Minangkabau|minangkabau
|
||||
mis|||Uncoded languages|langues non codées
|
||||
mkh|||Mon-Khmer languages|môn-khmer, langues
|
||||
mlg||mg|Malagasy|malgache
|
||||
mlt||mt|Maltese|maltais
|
||||
mnc|||Manchu|mandchou
|
||||
mni|||Manipuri|manipuri
|
||||
mno|||Manobo languages|manobo, langues
|
||||
moh|||Mohawk|mohawk
|
||||
mon||mn|Mongolian|mongol
|
||||
mos|||Mossi|moré
|
||||
mul|||Multiple languages|multilingue
|
||||
mun|||Munda languages|mounda, langues
|
||||
mus|||Creek|muskogee
|
||||
mwl|||Mirandese|mirandais
|
||||
mwr|||Marwari|marvari
|
||||
myn|||Mayan languages|maya, langues
|
||||
myv|||Erzya|erza
|
||||
nah|||Nahuatl languages|nahuatl, langues
|
||||
nai|||North American Indian languages|nord-amérindiennes, langues
|
||||
nap|||Neapolitan|napolitain
|
||||
nau||na|Nauru|nauruan
|
||||
nav||nv|Navajo; Navaho|navaho
|
||||
nbl||nr|Ndebele, South; South Ndebele|ndébélé du Sud
|
||||
nde||nd|Ndebele, North; North Ndebele|ndébélé du Nord
|
||||
ndo||ng|Ndonga|ndonga
|
||||
nds|||Low German; Low Saxon; German, Low; Saxon, Low|bas allemand; bas saxon; allemand, bas; saxon, bas
|
||||
nep||ne|Nepali|népalais
|
||||
new|||Nepal Bhasa; Newari|nepal bhasa; newari
|
||||
nia|||Nias|nias
|
||||
nic|||Niger-Kordofanian languages|nigéro-kordofaniennes, langues
|
||||
niu|||Niuean|niué
|
||||
nno||nn|Norwegian Nynorsk; Nynorsk, Norwegian|norvégien nynorsk; nynorsk, norvégien
|
||||
nob||nb|Bokmål, Norwegian; Norwegian Bokmål|norvégien bokmål
|
||||
nog|||Nogai|nogaï; nogay
|
||||
non|||Norse, Old|norrois, vieux
|
||||
nor||no|Norwegian|norvégien
|
||||
nqo|||N'Ko|n'ko
|
||||
nso|||Pedi; Sepedi; Northern Sotho|pedi; sepedi; sotho du Nord
|
||||
nub|||Nubian languages|nubiennes, langues
|
||||
nwc|||Classical Newari; Old Newari; Classical Nepal Bhasa|newari classique
|
||||
nya||ny|Chichewa; Chewa; Nyanja|chichewa; chewa; nyanja
|
||||
nym|||Nyamwezi|nyamwezi
|
||||
nyn|||Nyankole|nyankolé
|
||||
nyo|||Nyoro|nyoro
|
||||
nzi|||Nzima|nzema
|
||||
oci||oc|Occitan (post 1500); Provençal|occitan (après 1500); provençal
|
||||
oji||oj|Ojibwa|ojibwa
|
||||
ori||or|Oriya|oriya
|
||||
orm||om|Oromo|galla
|
||||
osa|||Osage|osage
|
||||
oss||os|Ossetian; Ossetic|ossète
|
||||
ota|||Turkish, Ottoman (1500-1928)|turc ottoman (1500-1928)
|
||||
oto|||Otomian languages|otomi, langues
|
||||
paa|||Papuan languages|papoues, langues
|
||||
pag|||Pangasinan|pangasinan
|
||||
pal|||Pahlavi|pahlavi
|
||||
pam|||Pampanga; Kapampangan|pampangan
|
||||
pan||pa|Panjabi; Punjabi|pendjabi
|
||||
pap|||Papiamento|papiamento
|
||||
pau|||Palauan|palau
|
||||
peo|||Persian, Old (ca.600-400 B.C.)|perse, vieux (ca. 600-400 av. J.-C.)
|
||||
per|fas|fa|Persian|persan
|
||||
phi|||Philippine languages|philippines, langues
|
||||
phn|||Phoenician|phénicien
|
||||
pli||pi|Pali|pali
|
||||
pol||pl|Polish|polonais
|
||||
pon|||Pohnpeian|pohnpei
|
||||
por||pt|Portuguese|portugais
|
||||
pra|||Prakrit languages|prâkrit, langues
|
||||
pro|||Provençal, Old (to 1500)|provençal ancien (jusqu'à 1500)
|
||||
pus||ps|Pushto; Pashto|pachto
|
||||
qaa-qtz|||Reserved for local use|réservée à l'usage local
|
||||
que||qu|Quechua|quechua
|
||||
raj|||Rajasthani|rajasthani
|
||||
rap|||Rapanui|rapanui
|
||||
rar|||Rarotongan; Cook Islands Maori|rarotonga; maori des îles Cook
|
||||
roa|||Romance languages|romanes, langues
|
||||
roh||rm|Romansh|romanche
|
||||
rom|||Romany|tsigane
|
||||
rum|ron|ro|Romanian; Moldavian; Moldovan|roumain; moldave
|
||||
run||rn|Rundi|rundi
|
||||
rup|||Aromanian; Arumanian; Macedo-Romanian|aroumain; macédo-roumain
|
||||
rus||ru|Russian|russe
|
||||
sad|||Sandawe|sandawe
|
||||
sag||sg|Sango|sango
|
||||
sah|||Yakut|iakoute
|
||||
sai|||South American Indian (Other)|indiennes d'Amérique du Sud, autres langues
|
||||
sal|||Salishan languages|salishennes, langues
|
||||
sam|||Samaritan Aramaic|samaritain
|
||||
san||sa|Sanskrit|sanskrit
|
||||
sas|||Sasak|sasak
|
||||
sat|||Santali|santal
|
||||
scn|||Sicilian|sicilien
|
||||
sco|||Scots|écossais
|
||||
sel|||Selkup|selkoupe
|
||||
sem|||Semitic languages|sémitiques, langues
|
||||
sga|||Irish, Old (to 900)|irlandais ancien (jusqu'à 900)
|
||||
sgn|||Sign Languages|langues des signes
|
||||
shn|||Shan|chan
|
||||
sid|||Sidamo|sidamo
|
||||
sin||si|Sinhala; Sinhalese|singhalais
|
||||
sio|||Siouan languages|sioux, langues
|
||||
sit|||Sino-Tibetan languages|sino-tibétaines, langues
|
||||
sla|||Slavic languages|slaves, langues
|
||||
slo|slk|sk|Slovak|slovaque
|
||||
slv||sl|Slovenian|slovène
|
||||
sma|||Southern Sami|sami du Sud
|
||||
sme||se|Northern Sami|sami du Nord
|
||||
smi|||Sami languages|sames, langues
|
||||
smj|||Lule Sami|sami de Lule
|
||||
smn|||Inari Sami|sami d'Inari
|
||||
smo||sm|Samoan|samoan
|
||||
sms|||Skolt Sami|sami skolt
|
||||
sna||sn|Shona|shona
|
||||
snd||sd|Sindhi|sindhi
|
||||
snk|||Soninke|soninké
|
||||
sog|||Sogdian|sogdien
|
||||
som||so|Somali|somali
|
||||
son|||Songhai languages|songhai, langues
|
||||
sot||st|Sotho, Southern|sotho du Sud
|
||||
spa||es|Spanish; Castilian|espagnol; castillan
|
||||
srd||sc|Sardinian|sarde
|
||||
srn|||Sranan Tongo|sranan tongo
|
||||
srp||sr|Serbian|serbe
|
||||
srr|||Serer|sérère
|
||||
ssa|||Nilo-Saharan languages|nilo-sahariennes, langues
|
||||
ssw||ss|Swati|swati
|
||||
suk|||Sukuma|sukuma
|
||||
sun||su|Sundanese|soundanais
|
||||
sus|||Susu|soussou
|
||||
sux|||Sumerian|sumérien
|
||||
swa||sw|Swahili|swahili
|
||||
swe||sv|Swedish|suédois
|
||||
syc|||Classical Syriac|syriaque classique
|
||||
syr|||Syriac|syriaque
|
||||
tah||ty|Tahitian|tahitien
|
||||
tai|||Tai languages|tai, langues
|
||||
tam||ta|Tamil|tamoul
|
||||
tat||tt|Tatar|tatar
|
||||
tel||te|Telugu|télougou
|
||||
tem|||Timne|temne
|
||||
ter|||Tereno|tereno
|
||||
tet|||Tetum|tetum
|
||||
tgk||tg|Tajik|tadjik
|
||||
tgl||tl|Tagalog|tagalog
|
||||
tha||th|Thai|thaï
|
||||
tib|bod|bo|Tibetan|tibétain
|
||||
tig|||Tigre|tigré
|
||||
tir||ti|Tigrinya|tigrigna
|
||||
tiv|||Tiv|tiv
|
||||
tkl|||Tokelau|tokelau
|
||||
tlh|||Klingon; tlhIngan-Hol|klingon
|
||||
tli|||Tlingit|tlingit
|
||||
tmh|||Tamashek|tamacheq
|
||||
tog|||Tonga (Nyasa)|tonga (Nyasa)
|
||||
ton||to|Tonga (Tonga Islands)|tongan (Îles Tonga)
|
||||
tpi|||Tok Pisin|tok pisin
|
||||
tsi|||Tsimshian|tsimshian
|
||||
tsn||tn|Tswana|tswana
|
||||
tso||ts|Tsonga|tsonga
|
||||
tuk||tk|Turkmen|turkmène
|
||||
tum|||Tumbuka|tumbuka
|
||||
tup|||Tupi languages|tupi, langues
|
||||
tur||tr|Turkish|turc
|
||||
tut|||Altaic languages|altaïques, langues
|
||||
tvl|||Tuvalu|tuvalu
|
||||
twi||tw|Twi|twi
|
||||
tyv|||Tuvinian|touva
|
||||
udm|||Udmurt|oudmourte
|
||||
uga|||Ugaritic|ougaritique
|
||||
uig||ug|Uighur; Uyghur|ouïgour
|
||||
ukr||uk|Ukrainian|ukrainien
|
||||
umb|||Umbundu|umbundu
|
||||
und|||Undetermined|indéterminée
|
||||
urd||ur|Urdu|ourdou
|
||||
uzb||uz|Uzbek|ouszbek
|
||||
vai|||Vai|vaï
|
||||
ven||ve|Venda|venda
|
||||
vie||vi|Vietnamese|vietnamien
|
||||
vol||vo|Volapük|volapük
|
||||
vot|||Votic|vote
|
||||
wak|||Wakashan languages|wakashanes, langues
|
||||
wal|||Walamo|walamo
|
||||
war|||Waray|waray
|
||||
was|||Washo|washo
|
||||
wel|cym|cy|Welsh|gallois
|
||||
wen|||Sorbian languages|sorabes, langues
|
||||
wln||wa|Walloon|wallon
|
||||
wol||wo|Wolof|wolof
|
||||
xal|||Kalmyk; Oirat|kalmouk; oïrat
|
||||
xho||xh|Xhosa|xhosa
|
||||
yao|||Yao|yao
|
||||
yap|||Yapese|yapois
|
||||
yid||yi|Yiddish|yiddish
|
||||
yor||yo|Yoruba|yoruba
|
||||
ypk|||Yupik languages|yupik, langues
|
||||
zap|||Zapotec|zapotèque
|
||||
zbl|||Blissymbols; Blissymbolics; Bliss|symboles Bliss; Bliss
|
||||
zen|||Zenaga|zenaga
|
||||
zha||za|Zhuang; Chuang|zhuang; chuang
|
||||
znd|||Zande languages|zandé, langues
|
||||
zul||zu|Zulu|zoulou
|
||||
zun|||Zuni|zuni
|
||||
zxx|||No linguistic content; Not applicable|pas de contenu linguistique; non applicable
|
||||
zza|||Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki|zaza; dimili; dimli; kirdki; kirmanjki; zazaki
|
286
appy/shared/data/__init__.py
Normal file
286
appy/shared/data/__init__.py
Normal file
|
@ -0,0 +1,286 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''This folder contains copies of external, "authentic" data, stored as text
|
||||
files, like ISO 639.2 country codes. In this package, corresponding Python
|
||||
classes are available for accessing the data in the text files.'''
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
import os, os.path
|
||||
|
||||
# List of names of language in their own language ------------------------------
|
||||
# It was copied from Plone 2.5.5 (PloneLanguageTool), don't know any "authentic
|
||||
# source" for that.
|
||||
nativeNames = {
|
||||
'aa' : 'магIарул мацI',
|
||||
'ab' : 'бызшәа',
|
||||
'af' : 'Afrikaans',
|
||||
'am' : 'አማርኛ',
|
||||
'ar' : 'العربية',
|
||||
'as' : 'অসমিয়া',
|
||||
'ay' : 'Aymara',
|
||||
'az' : 'Azəri Türkçəsi',
|
||||
'ba' : 'Bashkir',
|
||||
'be' : 'Беларускі',
|
||||
'bg' : 'Български',
|
||||
'bh' : 'Bihari',
|
||||
'bi' : 'Bislama',
|
||||
'bn' : 'বাংলা',
|
||||
'bo' : 'བོད་སྐད་',
|
||||
'bs' : 'Bosanski',
|
||||
'br' : 'Brezhoneg',
|
||||
'ca' : 'Català',
|
||||
'ch' : 'Chamoru',
|
||||
'co' : 'Corsu',
|
||||
'cs' : 'Čeština',
|
||||
'cy' : 'Cymraeg',
|
||||
'da' : 'Dansk',
|
||||
'de' : 'Deutsch',
|
||||
'dz' : 'རྫོང་ཁ',
|
||||
'el' : 'Ελληνικά',
|
||||
'en' : 'English',
|
||||
'eo' : 'Esperanto',
|
||||
'es' : 'Español',
|
||||
'et' : 'Eesti',
|
||||
'eu' : 'Euskara',
|
||||
'fa' : 'فارسی',
|
||||
'fi' : 'Suomi',
|
||||
'fj' : 'Fiji',
|
||||
'fo' : 'Føroyska',
|
||||
'fr' : 'Français',
|
||||
'fy' : 'Frysk',
|
||||
'ga' : 'Gaeilge',
|
||||
'gd' : 'Gàidhlig',
|
||||
'gl' : 'Galego',
|
||||
'gn' : 'Guarani',
|
||||
'gu' : 'ગુજરાતી',
|
||||
'gv' : 'Gaelg',
|
||||
'ha' : 'هَوُس',
|
||||
'he' : 'עברית',
|
||||
'hi' : 'हिंदी',
|
||||
'hr' : 'Hrvatski',
|
||||
'hu' : 'Magyar',
|
||||
'hy' : 'Հայերէն',
|
||||
'ia' : 'Interlingua',
|
||||
'id' : 'Bahasa Indonesia',
|
||||
'ie' : 'Interlingue',
|
||||
'ik' : 'Inupiak',
|
||||
'is' : 'Íslenska',
|
||||
'it' : 'Italiano',
|
||||
'iu' : 'ᐃᓄᒃᑎᑐᑦ',
|
||||
'ja' : '日本語',
|
||||
'jbo': 'lojban',
|
||||
'jw' : 'Basa Jawi',
|
||||
'ka' : 'ქართული',
|
||||
'kk' : 'ﻗﺎﺯﺍﻗﺸﺎ',
|
||||
'kl' : 'Greenlandic',
|
||||
'km' : 'ខ្មែរ',
|
||||
'kn' : 'ಕನ್ನಡ',
|
||||
'ko' : '한국어',
|
||||
'ks' : 'काऽशुर',
|
||||
'ku' : 'Kurdí',
|
||||
'kw' : 'Kernewek',
|
||||
'ky' : 'Кыргыз',
|
||||
'la' : 'Latin',
|
||||
'lb' : 'Lëtzebuergesch',
|
||||
'li' : 'Limburgs',
|
||||
'ln' : 'Lingala',
|
||||
'lo' : 'ພາສາລາວ',
|
||||
'lt' : 'Lietuviskai',
|
||||
'lv' : 'Latviešu',
|
||||
'mg' : 'Malagasy',
|
||||
'mi' : 'Maori',
|
||||
'mk' : 'Македонски',
|
||||
'ml' : 'മലയാളം',
|
||||
'mn' : 'Монгол',
|
||||
'mo' : 'Moldavian',
|
||||
'mr' : 'मराठी',
|
||||
'ms' : 'Bahasa Melayu',
|
||||
'mt' : 'Malti',
|
||||
'my' : 'Burmese',
|
||||
'na' : 'Nauru',
|
||||
'ne' : 'नेपाली',
|
||||
'nl' : 'Nederlands',
|
||||
'no' : 'Norsk',
|
||||
'nn' : 'Nynorsk',
|
||||
'oc' : 'Languedoc',
|
||||
'om' : 'Oromo',
|
||||
'or' : 'ଓଡ଼ିଆ',
|
||||
'pa' : 'ਪੰਜਾਬੀ',
|
||||
'pl' : 'Polski',
|
||||
'ps' : 'پښتو',
|
||||
'pt' : 'Português',
|
||||
'qu' : 'Quechua',
|
||||
'rm' : 'Rumantsch',
|
||||
'rn' : 'Kirundi',
|
||||
'ro' : 'Română',
|
||||
'ru' : 'Русский',
|
||||
'rw' : 'Kiyarwanda',
|
||||
'sa' : 'संस्कृत',
|
||||
'sd' : 'Sindhi',
|
||||
'se' : 'Northern Sámi',
|
||||
'sg' : 'Sangho',
|
||||
'sh' : 'Serbo-Croatian',
|
||||
'si' : 'Singhalese',
|
||||
'sk' : 'Slovenčina',
|
||||
'sl' : 'Slovenščina',
|
||||
'sm' : 'Samoan',
|
||||
'sn' : 'Shona',
|
||||
'so' : 'Somali',
|
||||
'sq' : 'Shqip',
|
||||
'sr' : 'српски',
|
||||
'ss' : 'Siswati',
|
||||
'st' : 'Sesotho',
|
||||
'su' : 'Sudanese',
|
||||
'sv' : 'Svenska',
|
||||
'sw' : 'Kiswahili',
|
||||
'ta' : 'தமிழ',
|
||||
'te' : 'తెలుగు',
|
||||
'tg' : 'Тоҷики',
|
||||
'th' : 'ไทย',
|
||||
'ti' : 'ትግርኛ',
|
||||
'tk' : 'түркmенче',
|
||||
'tl' : 'Tagalog',
|
||||
'tn' : 'Setswana',
|
||||
'to' : 'Lea faka-Tonga',
|
||||
'tr' : 'Türkçe',
|
||||
'ts' : 'Tsonga',
|
||||
'tt' : 'татарча',
|
||||
'tw' : 'Twi',
|
||||
'ug' : 'Uigur',
|
||||
'uk' : 'Українська',
|
||||
'ur' : 'اردو',
|
||||
'uz' : 'Ўзбекча',
|
||||
'vi' : 'Tiếng Việt',
|
||||
'vo' : 'Volapük',
|
||||
'wa' : 'Walon',
|
||||
'wo' : 'Wolof',
|
||||
'xh' : 'isiXhosa',
|
||||
'yi' : 'ײִדיש',
|
||||
'yo' : 'Yorùbá',
|
||||
'za' : 'Zhuang',
|
||||
'zh' : '中文',
|
||||
'zu' : 'isiZulu'
|
||||
}
|
||||
# List of languages having direction right-to-left (RTL) -----------------------
|
||||
rtlLanguages = ('ar', 'he', 'fa')
|
||||
|
||||
# Countries of the "euro" zone
|
||||
vatEuroCountries = ('AT', 'BE', 'BG', 'CY', 'CZ', 'DE', 'DK', 'EE', 'GR', 'ES',
|
||||
'FI', 'FR', 'GB', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'MT',
|
||||
'NL', 'PL' 'PT', 'RO', 'SE', 'SI', 'SK')
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Languages:
|
||||
'''This class gives access to the language codes and names as standardized
|
||||
by ISO-639. The file has been downloaded in July 2009 from
|
||||
http://www.loc.gov/standards/iso639-2/ascii_8bits.html (UTF-8 version)'''
|
||||
|
||||
def __init__(self):
|
||||
self.fileName = os.path.dirname(__file__) + '/LanguageCodesIso639.2.txt'
|
||||
self.languageCodes = []
|
||||
# Names of languages in English
|
||||
self.languageNames = []
|
||||
# Names of languages in their language. It is not part of ISO 639.2 and
|
||||
# is taken from dict languageNames above.
|
||||
self.nativeNames = []
|
||||
self.parseFile()
|
||||
|
||||
def parseFile(self):
|
||||
'''Parses the language codes and names in the ISO file and puts them in
|
||||
self.languageCodes, self.languageNames and self.nativeNames.'''
|
||||
f = file(self.fileName)
|
||||
for line in f:
|
||||
if line.strip():
|
||||
lineElems = line.split('|')
|
||||
if lineElems[2].strip():
|
||||
# I take only those that have a 2-chars ISO-639-1 code.
|
||||
self.languageCodes.append(lineElems[2])
|
||||
self.languageNames.append(lineElems[3])
|
||||
if lineElems[2] in nativeNames:
|
||||
self.nativeNames.append(nativeNames[lineElems[2]])
|
||||
else:
|
||||
# Put the english name nevertheless.
|
||||
self.nativeNames.append(lineElems[3])
|
||||
f.close()
|
||||
|
||||
def exists(self, code):
|
||||
'''Is p_code a valid 2-digits language code?'''
|
||||
return code in self.languageCodes
|
||||
|
||||
def get(self, code):
|
||||
'''Returns information about the language whose code is p_code.'''
|
||||
try:
|
||||
iCode = self.languageCodes.index(code)
|
||||
return self.languageCodes[iCode], self.languageNames[iCode], \
|
||||
self.nativeNames[iCode]
|
||||
except ValueError:
|
||||
return None, None, None
|
||||
|
||||
def __repr__(self):
|
||||
i = -1
|
||||
res = ''
|
||||
for languageCode in self.languageCodes:
|
||||
i += 1
|
||||
res += 'Language: ' + languageCode + ' - ' + self.languageNames[i]
|
||||
res += '\n'
|
||||
return res
|
||||
# We instantiate here Languages because it is used by the appy.gen languages
|
||||
# management.
|
||||
languages = Languages()
|
||||
|
||||
# Country codes ISO 3166-1. ----------------------------------------------------
|
||||
class Countries:
|
||||
'''This class gives access to the country codes and names as standardized by
|
||||
ISO 3166-1. The file has been downloaded in March 2011 from
|
||||
http://www.iso.org/iso/country_codes/iso_3166_code_lists.htm
|
||||
(first line has been removed).'''
|
||||
|
||||
def __init__(self):
|
||||
# This file has been downloaded from
|
||||
# http://www.iso.org/iso/country_codes.htm and converted to utf-8.
|
||||
self.fileName = os.path.dirname(__file__) + '/CountryCodesIso3166.1.txt'
|
||||
self.countryCodes = []
|
||||
# Names of countries in English
|
||||
self.countryNames = []
|
||||
self.parseFile()
|
||||
|
||||
def parseFile(self):
|
||||
f = file(self.fileName)
|
||||
for line in f:
|
||||
if line.strip():
|
||||
name, code = line.split(';')
|
||||
self.countryCodes.append(code.strip())
|
||||
self.countryNames.append(name.strip())
|
||||
f.close()
|
||||
|
||||
def exists(self, code):
|
||||
'''Is p_code a valid 2-digits country code?'''
|
||||
return code in self.countryCodes
|
||||
# We instantiate here Countries because it is used by appy.gen for some field
|
||||
# validations.
|
||||
countries = Countries()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class BelgianCities:
|
||||
'''This class contains data about Belgian cities (postal codes). It creates
|
||||
a dictionary whose keys are postal codes and whose values are city names.
|
||||
The corresponding Excel file was downloaded on 2009-10-26 from
|
||||
https://www.post.be/site/fr/sse/advertising/addressed/biblio.html,
|
||||
converted to CSV (field separator being ";" field content is surrrounded
|
||||
by double quotes).'''
|
||||
|
||||
def __init__(self):
|
||||
self.fileName = os.path.dirname(__file__) + '/BelgianCommunes.txt'
|
||||
self.data = {}
|
||||
self.parseFile()
|
||||
|
||||
def parseFile(self):
|
||||
f = file(self.fileName)
|
||||
for line in f:
|
||||
if line.strip():
|
||||
lineElems = line.split(';')
|
||||
self.data[int(lineElems[0].strip('"'))]= lineElems[1].strip('"')
|
||||
|
||||
def exists(self, postalCode):
|
||||
'''Is postalCode a valid Belgian postal code?'''
|
||||
return self.data.has_key(postalCode)
|
||||
# ------------------------------------------------------------------------------
|
291
appy/shared/dav.py
Normal file
291
appy/shared/dav.py
Normal file
|
@ -0,0 +1,291 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
import os, re, http.client, sys, stat, urllib.parse, time, socket, xml.sax
|
||||
from urllib.parse import quote
|
||||
from StringIO import StringIO
|
||||
from mimetypes import guess_type
|
||||
from base64 import encodestring
|
||||
from appy import Object
|
||||
from appy.shared.utils import copyData, sequenceTypes
|
||||
from appy.shared.xml_parser import XmlUnmarshaller, XmlMarshaller
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class ResourceError(Exception): pass
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class FormDataEncoder:
|
||||
'''Allows to encode form data for sending it through a HTTP request.'''
|
||||
def __init__(self, data):
|
||||
self.data = data # The data to encode, as a dict
|
||||
|
||||
def marshalValue(self, name, value):
|
||||
if isinstance(value, basestring):
|
||||
return '%s=%s' % (name, quote(str(value)))
|
||||
elif isinstance(value, float):
|
||||
return '%s:float=%s' % (name, value)
|
||||
elif isinstance(value, int):
|
||||
return '%s:int=%s' % (name, value)
|
||||
elif isinstance(value, int):
|
||||
res = '%s:long=%s' % (name, value)
|
||||
if res[-1] == 'L':
|
||||
res = res[:-1]
|
||||
return res
|
||||
else:
|
||||
raise 'Cannot encode value %s' % str(value)
|
||||
|
||||
def encode(self):
|
||||
res = []
|
||||
for name, value in self.data.iteritems():
|
||||
res.append(self.marshalValue(name, value))
|
||||
return '&'.join(res)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class SoapDataEncoder:
|
||||
'''Allows to encode SOAP data for sending it through a HTTP request.'''
|
||||
namespaces = {'SOAP-ENV': 'http://schemas.xmlsoap.org/soap/envelope/',
|
||||
'xsd' : 'http://www.w3.org/2001/XMLSchema',
|
||||
'xsi' : 'http://www.w3.org/2001/XMLSchema-instance'}
|
||||
namespacedTags = {'Envelope': 'SOAP-ENV', 'Body': 'SOAP-ENV', '*': 'py'}
|
||||
|
||||
def __init__(self, data, namespace='http://appyframework.org'):
|
||||
self.data = data
|
||||
# p_data can be:
|
||||
# - a string already containing a complete SOAP message
|
||||
# - a Python object, that we will convert to a SOAP message
|
||||
# Define the namespaces for this request
|
||||
self.ns = self.namespaces.copy()
|
||||
self.ns['py'] = namespace
|
||||
|
||||
def encode(self):
|
||||
# Do nothing if we have a SOAP message already
|
||||
if isinstance(self.data, basestring): return self.data
|
||||
# self.data is here a Python object. Wrap it in a SOAP Body.
|
||||
soap = Object(Body=self.data)
|
||||
# Marshall it.
|
||||
marshaller = XmlMarshaller(rootTag='Envelope', namespaces=self.ns,
|
||||
namespacedTags=self.namespacedTags)
|
||||
return marshaller.marshall(soap)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class HttpResponse:
|
||||
'''Stores information about a HTTP response.'''
|
||||
def __init__(self, response, body, duration=None, utf8=True):
|
||||
self.code = response.status # The return code, ie 404, 200, 500...
|
||||
self.text = response.reason # Textual description of the code
|
||||
self.headers = response.msg # A dict-like object containing the headers
|
||||
self.body = body # The body of the HTTP response
|
||||
# p_duration, if given, is the time, in seconds, we have waited, before
|
||||
# getting this response after having sent the request.
|
||||
self.duration = duration
|
||||
self.utf8 = utf8
|
||||
# The following attribute may contain specific data extracted from
|
||||
# the previous fields. For example, when response if 302 (Redirect),
|
||||
# self.data contains the URI where we must redirect the user to.
|
||||
self.data = self.extractData()
|
||||
|
||||
def __repr__(self):
|
||||
duration = ''
|
||||
if self.duration: duration = ', got in %.4f seconds' % self.duration
|
||||
return '<HttpResponse %s (%s)%s>' % (self.code, self.text, duration)
|
||||
|
||||
def extractContentType(self, contentType):
|
||||
'''Extract the content type from the HTTP header, potentially removing
|
||||
encoding-related data.'''
|
||||
i = contentType.find(';')
|
||||
if i != -1: return contentType[:i]
|
||||
return contentType
|
||||
|
||||
xmlHeaders = ('text/xml', 'application/xml', 'application/soap+xml')
|
||||
def extractData(self):
|
||||
'''This method extracts, from the various parts of the HTTP response,
|
||||
some useful information. For example, it will find the URI where to
|
||||
redirect the user to if self.code is 302, or will unmarshall XML
|
||||
data into Python objects.'''
|
||||
if self.code == 302:
|
||||
return urllib.parse.urlparse(self.headers['location'])[2]
|
||||
elif 'content-type' in self.headers:
|
||||
contentType = self.extractContentType(self.headers['content-type'])
|
||||
for xmlHeader in self.xmlHeaders:
|
||||
if contentType.startswith(xmlHeader):
|
||||
# Return an unmarshalled version of the XML content, for
|
||||
# easy use in Python.
|
||||
try:
|
||||
parser = XmlUnmarshaller(utf8=self.utf8)
|
||||
res = parser.parse(self.body)
|
||||
if parser.rootTag == 'exception':
|
||||
# This is an exception: "res" contains the traceback
|
||||
raise ResourceError('Distant server exception: ' \
|
||||
'%s' % res)
|
||||
return res
|
||||
except xml.sax.SAXParseException as se:
|
||||
raise ResourceError('Invalid XML response (%s)'%str(se))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
urlRex = re.compile(r'http[s]?://([^:/]+)(:[0-9]+)?(/.+)?', re.I)
|
||||
binaryRex = re.compile(r'[\000-\006\177-\277]')
|
||||
|
||||
class Resource:
|
||||
'''Every instance of this class represents some web resource accessible
|
||||
through HTTP.'''
|
||||
|
||||
def __init__(self, url, username=None, password=None, measure=False,
|
||||
utf8=True):
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.url = url
|
||||
# If p_measure is True, we will measure, for every request sent, the
|
||||
# time we wait until we receive the response.
|
||||
self.measure = measure
|
||||
# If measure is True, we will store hereafter, the total time (in
|
||||
# seconds) spent waiting for the server for all requests sent through
|
||||
# this resource object.
|
||||
self.serverTime = 0
|
||||
# Split the URL into its components
|
||||
res = urlRex.match(url)
|
||||
if res:
|
||||
host, port, uri = res.group(1,2,3)
|
||||
self.host = host
|
||||
self.port = port and int(port[1:]) or 80
|
||||
self.uri = uri or '/'
|
||||
else: raise Exception('Wrong URL: %s' % str(url))
|
||||
# If some headers must be sent with any request sent through this
|
||||
# resource (like a cookie), you can store them in the following dict.
|
||||
self.headers = {'Host': self.host}
|
||||
self.utf8 = utf8
|
||||
|
||||
def __repr__(self):
|
||||
return '<Dav resource at %s>' % self.url
|
||||
|
||||
def updateHeaders(self, headers):
|
||||
# Add credentials if present
|
||||
if not (self.username and self.password): return
|
||||
if 'Authorization' in headers: return
|
||||
credentials = '%s:%s' % (self.username, self.password)
|
||||
credentials = credentials.replace('\012', '')
|
||||
headers['Authorization'] = "Basic %s" % encodestring(credentials)
|
||||
headers['User-Agent'] = 'Appy'
|
||||
headers['Host'] = self.host
|
||||
headers['Connection'] = 'close'
|
||||
headers['Accept'] = '*/*'
|
||||
return headers
|
||||
|
||||
def send(self, method, uri, body=None, headers={}, bodyType=None):
|
||||
'''Sends a HTTP request with p_method, for p_uri.'''
|
||||
conn = http.client.HTTPConnection(self.host, self.port)
|
||||
try:
|
||||
conn.connect()
|
||||
except socket.gaierror as sge:
|
||||
raise ResourceError('Check your Internet connection (%s)'% str(sge))
|
||||
except socket.error as se:
|
||||
raise ResourceError('Connection error (%s)' % str(se))
|
||||
# Tell what kind of HTTP request it will be.
|
||||
conn.putrequest(method, uri, skip_host=True)
|
||||
# Add HTTP headers
|
||||
self.updateHeaders(headers)
|
||||
if self.headers: headers.update(self.headers)
|
||||
for n, v in list(headers.items()): conn.putheader(n, v)
|
||||
conn.endheaders()
|
||||
# Add HTTP body
|
||||
if body:
|
||||
if not bodyType: bodyType = 'string'
|
||||
copyData(body, conn, 'send', type=bodyType)
|
||||
# Send the request, get the reply
|
||||
if self.measure: startTime = time.time()
|
||||
response = conn.getresponse()
|
||||
if self.measure: endTime = time.time()
|
||||
body = response.read()
|
||||
conn.close()
|
||||
# Return a smart object containing the various parts of the response
|
||||
duration = None
|
||||
if self.measure:
|
||||
duration = endTime - startTime
|
||||
self.serverTime += duration
|
||||
return HttpResponse(response, body, duration=duration, utf8=self.utf8)
|
||||
|
||||
def mkdir(self, name):
|
||||
'''Creates a folder named p_name in this resource.'''
|
||||
folderUri = self.uri + '/' + name
|
||||
#body = '<d:propertyupdate xmlns:d="DAV:"><d:set><d:prop>' \
|
||||
# '<d:displayname>%s</d:displayname></d:prop></d:set>' \
|
||||
# '</d:propertyupdate>' % name
|
||||
return self.send('MKCOL', folderUri)
|
||||
|
||||
def delete(self, name):
|
||||
'''Deletes a file or a folder (and all contained files if any) named
|
||||
p_name within this resource.'''
|
||||
toDeleteUri = self.uri + '/' + name
|
||||
return self.send('DELETE', toDeleteUri)
|
||||
|
||||
def add(self, content, type='fileName', name=''):
|
||||
'''Adds a file in this resource. p_type can be:
|
||||
- "fileName" In this case, p_content is the path to a file on disk
|
||||
and p_name is ignored;
|
||||
- "zope" In this case, p_content is an instance of
|
||||
OFS.Image.File and the name of the file is given in
|
||||
p_name.
|
||||
'''
|
||||
if type == 'fileName':
|
||||
# p_content is the name of a file on disk
|
||||
size = os.stat(content)[stat.ST_SIZE]
|
||||
body = file(content, 'rb')
|
||||
name = os.path.basename(content)
|
||||
fileType, encoding = guess_type(content)
|
||||
bodyType = 'file'
|
||||
elif type == 'zope':
|
||||
# p_content is a "Zope" file, ie a OFS.Image.File instance
|
||||
# p_name is given
|
||||
fileType = content.content_type
|
||||
encoding = None
|
||||
size = content.size
|
||||
body = content
|
||||
bodyType = 'zope'
|
||||
fileUri = self.uri + '/' + name
|
||||
headers = {'Content-Length': str(size)}
|
||||
if fileType: headers['Content-Type'] = fileType
|
||||
if encoding: headers['Content-Encoding'] = encoding
|
||||
res = self.send('PUT', fileUri, body, headers, bodyType=bodyType)
|
||||
# Close the file when relevant
|
||||
if type =='fileName': body.close()
|
||||
return res
|
||||
|
||||
def get(self, uri=None, headers={}, params=None):
|
||||
'''Perform a HTTP GET on the server. Parameters can be given as a dict
|
||||
in p_params.'''
|
||||
if not uri: uri = self.uri
|
||||
# Encode and append params if given
|
||||
if params:
|
||||
sep = ('?' in uri) and '&' or '?'
|
||||
uri = '%s%s%s' % (uri, sep, urllib.urlencode(params))
|
||||
return self.send('GET', uri, headers=headers)
|
||||
rss = get
|
||||
|
||||
def post(self, data=None, uri=None, headers={}, encode='form'):
|
||||
'''Perform a HTTP POST on the server. If p_encode is "form", p_data is
|
||||
considered to be a dict representing form data that will be
|
||||
form-encoded. Else, p_data will be considered as the ready-to-send
|
||||
body of the HTTP request.'''
|
||||
if not uri: uri = self.uri
|
||||
# Prepare the data to send
|
||||
if encode == 'form':
|
||||
# Format the form data and prepare headers
|
||||
body = FormDataEncoder(data).encode()
|
||||
headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
else:
|
||||
body = data
|
||||
headers['Content-Length'] = str(len(body))
|
||||
return self.send('POST', uri, headers=headers, body=body)
|
||||
|
||||
def soap(self, data, uri=None, headers={}, namespace=None, soapAction=None):
|
||||
'''Sends a SOAP message to this resource. p_namespace is the URL of the
|
||||
server-specific namespace. If header value "SOAPAction" is different
|
||||
from self.url, specify it in p_soapAction.'''
|
||||
if not uri: uri = self.uri
|
||||
# Prepare the data to send
|
||||
data = SoapDataEncoder(data, namespace).encode()
|
||||
headers['SOAPAction'] = soapAction or self.url
|
||||
headers['Content-Type'] = 'text/xml'
|
||||
res = self.post(data, uri, headers=headers, encode=None)
|
||||
# Unwrap content from the SOAP envelope
|
||||
if hasattr(res.data, 'Body'):
|
||||
res.data = res.data.Body
|
||||
return res
|
||||
# ------------------------------------------------------------------------------
|
673
appy/shared/diff.py
Normal file
673
appy/shared/diff.py
Normal file
|
@ -0,0 +1,673 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
import re, difflib
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
innerDiff = re.compile('<span name="(insert|delete)".*? title="(.*?)">' \
|
||||
'(.*?)</span>')
|
||||
htmlTag = re.compile('<(?P<tag>\w+)( .*?)?>(.*)</(?P=tag)>')
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Merger:
|
||||
'''This class allows to merge 2 lines of text, each containing inserts and
|
||||
deletions.'''
|
||||
|
||||
# Exception that may be raised by this class if the merge fails.
|
||||
class MergeError(Exception): pass
|
||||
|
||||
def __init__(self, lineA, lineB, previousDiffs, differ):
|
||||
# lineA comes "naked": any diff previously found on it was removed from
|
||||
# it (ie, deleted text has been completely removed, while inserted text
|
||||
# has been included, but without its surrounding tag). Info about
|
||||
# previous diffs is kept in a separate variable "previousDiffs".
|
||||
self.lineA = lineA
|
||||
self.previousDiffs = previousDiffs
|
||||
# Differences between lineA and lineB have just been computed and are
|
||||
# included (within inner tags) in lineB. We will compute their position
|
||||
# in self.newDiffs (see below).
|
||||
self.lineB = lineB
|
||||
self.newDiffs = self.computeNewDiffs()
|
||||
# We choose to walk within self.lineB. We will keep in self.i our
|
||||
# current position within self.lineB.
|
||||
self.i = 0
|
||||
# The delta index that must be applied on previous diffs
|
||||
self.deltaPrevious = 0
|
||||
# A link to the caller HtmlDiff class
|
||||
self.differ = differ
|
||||
|
||||
def computeNewDiffs(self):
|
||||
'''lineB may include inner "insert" and/or tags. This function
|
||||
detects them.'''
|
||||
i = 0
|
||||
res = []
|
||||
while i < len(self.lineB):
|
||||
match = innerDiff.search(self.lineB, i)
|
||||
if not match: break
|
||||
res.append(match)
|
||||
i = match.end()
|
||||
return res
|
||||
|
||||
def getNextDiff(self):
|
||||
'''During the merging process on self.lineB, what next diff to
|
||||
"consume"? An old one? A new one?'''
|
||||
# No more diff ?
|
||||
if not self.previousDiffs and not self.newDiffs:
|
||||
return None, None, None
|
||||
# No more new diff ?
|
||||
if not self.newDiffs:
|
||||
diff = self.previousDiffs[0]
|
||||
del self.previousDiffs[0]
|
||||
return diff, diff.start() + self.deltaPrevious, True
|
||||
# No more previous diff ?
|
||||
if not self.previousDiffs:
|
||||
diff = self.newDiffs[0]
|
||||
del self.newDiffs[0]
|
||||
return diff, diff.start(), False
|
||||
# At least one more new and previous diff. Which one to consume?
|
||||
previousDiff = self.previousDiffs[0]
|
||||
newDiff = self.newDiffs[0]
|
||||
previousDiffIndex = previousDiff.start() + self.deltaPrevious
|
||||
newDiffIndex = newDiff.start()
|
||||
if previousDiffIndex <= newDiffIndex:
|
||||
# Previous wins
|
||||
del self.previousDiffs[0]
|
||||
return previousDiff, previousDiffIndex, True
|
||||
else:
|
||||
# New wins
|
||||
del self.newDiffs[0]
|
||||
return newDiff, newDiffIndex, False
|
||||
|
||||
def manageBackOverlap(self, newDiff, oldText):
|
||||
'''p_newDiff has been removed from self.lineB. Here we check if there
|
||||
is no overlap with inserts from self.lineA, ie, text that was
|
||||
inserted in one if the many cumulative updates from self.lineA and
|
||||
that was deleted in self.lineB.'''
|
||||
# Before managing the overlap, check if there is one.
|
||||
oldDiff, oldDiffStart, isPrevious = self.getNextDiff()
|
||||
newDiffEnd = self.i + len(newDiff.group(3)) - len(oldText)
|
||||
if not oldDiff or not isPrevious or (oldDiffStart >= newDiffEnd):
|
||||
# There is no overlapping. Dump p_newDiff and the next diff as is
|
||||
# (if any).
|
||||
res = self.dumpNewDiff(newDiff)
|
||||
if oldDiff:
|
||||
# WARNING: oldDiffStart is not up-to-date! Indeed, we have
|
||||
# called getNextDiff (at the start of this method) BEFORE
|
||||
# calling dumpNewDiff (the line above). But dumpNewDiff updates
|
||||
# self.deltaPrevious. So we need to recompute oldDiffStart with
|
||||
# the current self.deltaPrevious
|
||||
if isPrevious:
|
||||
oldDiffStart = oldDiff.start() + self.deltaPrevious
|
||||
res += self.dumpDiff(oldDiff, oldDiffStart, isPrevious)
|
||||
return res
|
||||
# If we are here, we must manage a back overlap. We will do it by
|
||||
# "consuming" p_newDiff.
|
||||
newText = newDiff.group(3)
|
||||
res = ''
|
||||
consumed = 0
|
||||
while True:
|
||||
# First, dump the part of p_newDiff that is not impacted by oldDiff.
|
||||
text = newText[consumed:oldDiffStart-self.i]
|
||||
if text:
|
||||
res += self.differ.getModifiedChunk(text, 'delete', '',
|
||||
msg=newDiff.group(2))
|
||||
consumed += len(text)
|
||||
# Then, dump the part that overlaps with oldDiff
|
||||
text = oldDiff.group(3)
|
||||
res += self.differ.getModifiedChunk(text, 'delete', '',
|
||||
msg=newDiff.group(2))
|
||||
consumed += len(text)
|
||||
if consumed >= len(newText): break
|
||||
# Get the next diff
|
||||
oldDiff, oldDiffStart, isPrevious = self.getNextDiff()
|
||||
if not oldDiff or not isPrevious or (oldDiffStart > newDiffEnd):
|
||||
# End of the overlapping. Dump what remains in newText and dump
|
||||
# this next uncorrelated diff afterwards.
|
||||
res += self.differ.getModifiedChunk(newText[consumed:],
|
||||
'delete', '', msg=newDiff.group(2))
|
||||
self.i += len(newDiff.group(0))
|
||||
if oldDiff:
|
||||
res += self.dumpDiff(oldDiff, oldDiffStart, isPrevious)
|
||||
return res
|
||||
# We have consumed p_newDiff entirely. Move forward within self.lineB
|
||||
# w.r.t p_newDiff.
|
||||
self.i += len(newDiff.group(0))
|
||||
return res
|
||||
|
||||
def manageOverlap(self, oldDiff):
|
||||
'''p_oldDiff is a previously inserted text from self.lineA. This text
|
||||
is not found anymore at the start of self.lineB[self.i:]: it means
|
||||
that an overlapping diff exists among new diffs. We will manage this
|
||||
by identifying several, cutted, "insert" and/or "edit" zones.'''
|
||||
# The idea here is to "consume" the old inserted text until we have
|
||||
# found, within the new diff, all updates that have been performed on
|
||||
# this old text. Then, we will have found the complete "zone" that was
|
||||
# impacted by both old and new diffs.
|
||||
oldText = oldDiff.group(3)
|
||||
res = ''
|
||||
while oldText:
|
||||
# Get the overlapping (new) diff.
|
||||
newDiff, newDiffStart, isPrevious = self.getNextDiff()
|
||||
if not newDiff or (newDiffStart >= (self.i + len(oldText))):
|
||||
# No more new diff, or a new diff but far away, not within
|
||||
# oldText. So insert new the rest of p_oldText.
|
||||
# Invariant: at this point, we should find what remains in
|
||||
# oldText at self.lineB[self.i:].
|
||||
if not self.lineB[self.i:].startswith(oldText):
|
||||
raise self.MergeError('An error occurred while computing ' \
|
||||
'overlapping diffs.')
|
||||
res += self.differ.getModifiedChunk(oldText, 'insert', '',
|
||||
msg=oldDiff.group(2))
|
||||
self.i += len(oldText)
|
||||
oldText = ''
|
||||
# If we have "popped" a new diff, dump it anyway.
|
||||
if newDiff:
|
||||
res += self.dumpDiff(newDiff, newDiffStart, isPrevious)
|
||||
break
|
||||
# Dump the part of the old text that has been untouched by the new
|
||||
# diff.
|
||||
if self.i < newDiffStart:
|
||||
untouched = self.lineB[self.i:newDiffStart]
|
||||
res += self.differ.getModifiedChunk(untouched, 'insert', '',
|
||||
msg=oldDiff.group(2))
|
||||
self.i = newDiffStart
|
||||
oldText = oldText[len(untouched):]
|
||||
# Manage the new diff
|
||||
if (newDiff.group(1) == 'delete') and \
|
||||
len(newDiff.group(3)) > len(oldText):
|
||||
# Among deleted text, check if there is no overlap with previous
|
||||
# diffs (text deleted in self.lineB, might have been added in
|
||||
# one of the many cumulated updates in self.lineA).
|
||||
res += self.manageBackOverlap(newDiff, oldText)
|
||||
oldText = ''
|
||||
else:
|
||||
# Dump the new diff and update oldText
|
||||
res += self.dumpNewDiff(newDiff)
|
||||
if newDiff.group(1) == 'delete':
|
||||
# Consume oldText, that was deleted, at least partly, by
|
||||
# this diff.
|
||||
oldText = oldText[len(newDiff.group(3)):]
|
||||
return res
|
||||
|
||||
def dumpNewDiff(self, diff):
|
||||
'''Computes p_newDiff as it must appear in the result and return it.'''
|
||||
# Dump the new diff (from self.lineB)
|
||||
res = diff.group(0)
|
||||
# Move forward within self.lineB
|
||||
self.i += len(diff.group(0))
|
||||
# Because of this new diff, all indexes computed on self.lineA are now
|
||||
# wrong because we express them relative to lineB. So: update
|
||||
# self.deltaPrevious to take this into account.
|
||||
self.deltaPrevious += len(diff.group(0))
|
||||
if diff.group(1) == 'delete':
|
||||
# The indexes in self.lineA do not take the deleted text into
|
||||
# account, because it wasn't deleted at this time. So remove
|
||||
# from self.deltaPrevious the length of removed text.
|
||||
self.deltaPrevious -= len(diff.group(3))
|
||||
return res
|
||||
|
||||
def dumpDiff(self, diff, diffStart, isPrevious):
|
||||
'''Computes the next p_diff (starting at p_diffStart) to insert into the
|
||||
result and return it. If p_isPrevious is True, the diff is an old one
|
||||
(from self.lineA); else, it is a new one (from self.lineB).'''
|
||||
# Dump the part of lineB between self.i and diffStart
|
||||
res = self.lineB[self.i:diffStart]
|
||||
self.i = diffStart
|
||||
if isPrevious:
|
||||
# Dump the old diff (from self.lineA)
|
||||
if diff.group(1) == 'insert':
|
||||
# Check if the inserted text is still present in lineB
|
||||
if self.lineB[self.i:].startswith(diff.group(3)):
|
||||
# Yes. Dump the diff and go ahead within lineB
|
||||
res += diff.group(0)
|
||||
self.i += len(diff.group(3))
|
||||
else:
|
||||
# The inserted text can't be found as is in lineB.
|
||||
# Must have been (partly) re-edited or removed.
|
||||
overlap = self.manageOverlap(diff)
|
||||
res += overlap
|
||||
elif diff.group(1) == 'delete':
|
||||
res += diff.group(0)
|
||||
else:
|
||||
res += self.dumpNewDiff(diff)
|
||||
return res
|
||||
|
||||
def merge(self):
|
||||
'''Merges self.previousDiffs into self.lineB.'''
|
||||
res = ''
|
||||
diff, diffStart, isPrevious = self.getNextDiff()
|
||||
while diff:
|
||||
res += self.dumpDiff(diff, diffStart, isPrevious)
|
||||
# Load the next diff, if any
|
||||
diff, diffStart, isPrevious = self.getNextDiff()
|
||||
# Dump the end of self.lineB if not completely consumed
|
||||
if self.i < len(self.lineB): res += self.lineB[self.i:]
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class HtmlDiff:
|
||||
'''This class allows to compute differences between two versions of some
|
||||
HTML chunk.'''
|
||||
insertStyle = 'color: blue; cursor: help'
|
||||
deleteStyle = 'color: red; text-decoration: line-through; cursor: help'
|
||||
|
||||
def __init__(self, old, new,
|
||||
insertMsg='Inserted text', deleteMsg='Deleted text',
|
||||
insertCss=None, deleteCss=None, insertName='insert',
|
||||
deleteName='delete', diffRatio=0.7):
|
||||
# p_old and p_new are strings containing chunks of HTML. If they are not
|
||||
# unicode strings, we convert them to unicode; this way, every char is
|
||||
# only one char lenght.
|
||||
self.old = old.strip()
|
||||
if isinstance(self.old, str): self.old = self.old.decode('utf-8')
|
||||
self.new = new.strip()
|
||||
if isinstance(self.new, str): self.new = self.new.decode('utf-8')
|
||||
# Every time an "insert" or "delete" difference will be detected from
|
||||
# p_old to p_new, the impacted chunk will be surrounded by a tag that
|
||||
# will get, respectively, a 'title' attribute filled p_insertMsg or
|
||||
# p_deleteMsg. The message will give an explanation about the change
|
||||
# (who made it and at what time, for example).
|
||||
self.insertMsg = insertMsg
|
||||
if isinstance(self.insertMsg, str):
|
||||
self.insertMsg = self.insertMsg.decode('utf-8')
|
||||
self.deleteMsg = deleteMsg
|
||||
if isinstance(self.deleteMsg, str):
|
||||
self.deleteMsg = self.deleteMsg.decode('utf-8')
|
||||
# This tag will get a CSS class p_insertCss or p_deleteCss for
|
||||
# highlighting the change. If no class is provided, default styles will
|
||||
# be used (see HtmlDiff.insertStyle and HtmlDiff.deleteStyle).
|
||||
self.insertCss = insertCss
|
||||
self.deleteCss = deleteCss
|
||||
# This tag will get a "name" attribute whose content will be
|
||||
# p_insertName or p_deleteName
|
||||
self.insertName = insertName
|
||||
self.deleteName = deleteName
|
||||
# The diff algorithm of this class will need to identify similarities
|
||||
# between strings. Similarity ratios will be computed by using method
|
||||
# difflib.SequenceMatcher.ratio (see m_isSimilar below). Strings whose
|
||||
# comparison will produce a ratio above p_diffRatio will be considered
|
||||
# as similar.
|
||||
self.diffRatio = diffRatio
|
||||
# Some computed values
|
||||
for tag in ('div', 'span'):
|
||||
for type in ('insert', 'delete'):
|
||||
setattr(self, '%s%sPrefix' % (tag, type.capitalize()),
|
||||
'<%s name="%s"' % (tag, getattr(self, '%sName' % type)))
|
||||
|
||||
def getModifiedChunk(self, seq, type, sep, msg=None):
|
||||
'''p_sep.join(p_seq) (if p_seq is a list) or p_seq (if p_seq is a
|
||||
string) is a chunk that was either inserted (p_type='insert') or
|
||||
deleted (p_type='delete'). This method will surround this part with
|
||||
a div or span tag that will get some CSS class allowing to highlight
|
||||
the update.
|
||||
|
||||
If p_msg is given, it will be used instead of the default
|
||||
p_type-related message stored on p_self.'''
|
||||
# Will the surrouding tag be a div or a span?
|
||||
if sep == '\n': tag = 'div'
|
||||
else: tag = 'span'
|
||||
# What message will it show in its 'title' attribute?
|
||||
if not msg:
|
||||
exec('msg = self.%sMsg' % type)
|
||||
# What CSS class (or, if none, tag-specific style) will be used ?
|
||||
exec('cssClass = self.%sCss' % type)
|
||||
if cssClass:
|
||||
style = 'class="%s"' % cssClass
|
||||
else:
|
||||
exec('style = self.%sStyle' % type)
|
||||
style = 'style="%s"' % style
|
||||
# The 'name' attribute of the tag indicates the type of the update.
|
||||
exec('tagName = self.%sName' % type)
|
||||
# The idea is: if there are several lines, every line must be surrounded
|
||||
# by a tag. This way, we know that a surrounding tag can't span several
|
||||
# lines, which is a prerequisite for managing cumulative diffs.
|
||||
if sep == ' ':
|
||||
if not isinstance(seq, str):
|
||||
seq = sep.join(seq)
|
||||
sep = ''
|
||||
if isinstance(seq, str):
|
||||
return '%s<%s name="%s" %s title="%s">%s</%s>%s' % \
|
||||
(sep, tag, tagName, style, msg, seq, tag, sep)
|
||||
else:
|
||||
res = ''
|
||||
for line in seq:
|
||||
res += '%s<%s name="%s" %s title="%s">%s</%s>%s' % \
|
||||
(sep, tag, tagName, style, msg, line, tag, sep)
|
||||
return res
|
||||
|
||||
def applyDiff(self, line, diff):
|
||||
'''p_diff is a regex containing an insert or delete that was found
|
||||
within line. This function applies the diff, removing or inserting
|
||||
the diff into p_line.'''
|
||||
# Keep content only for "insert" tags.
|
||||
content = ''
|
||||
if diff.group(1) == 'insert':
|
||||
content = diff.group(3)
|
||||
return line[:diff.start()] + content + line[diff.end():]
|
||||
|
||||
def isSimilar(self, s1, s2):
|
||||
'''Returns True if strings p_s1 and p_s2 can be considered as
|
||||
similar.'''
|
||||
# Bypass the similarity algorithm for strings of length==1. Else, it can
|
||||
# lead to infinite loops between methods getHtmlDiff and getReplacement.
|
||||
if (len(s1) == 1) and (len(s2) == 1) and (s1 != s2): return False
|
||||
ratio = difflib.SequenceMatcher(a=s1.lower(), b=s2.lower()).ratio()
|
||||
return ratio > self.diffRatio
|
||||
|
||||
def getLineAndType(self, line):
|
||||
'''p_line is a string that can already have been surrounded by an
|
||||
"insert" or "delete" tag. This is what we try to determine here.
|
||||
This method returns a tuple (type, line, innerDiffs, outerTag),
|
||||
where "type" can be:
|
||||
* "insert" if it has already been flagged as inserted;
|
||||
* "delete" if it has already been flagged as deleted;
|
||||
* None else;
|
||||
"line" holds the original parameter p_line, excepted:
|
||||
* if type="insert". In that case, the surrounding insert tag has been
|
||||
removed and placed into "outerTag" (a re.MatchObject from regex
|
||||
innerHtml, see above);
|
||||
* if inner diff tags (insert or delete) are found. In that case,
|
||||
- if inner "insert" tags are found, they are removed but their
|
||||
content is kept;
|
||||
- if inner "delete" tags are found, they are removed, content
|
||||
included;
|
||||
- "innerDiffs" holds the list of re.MatchObject instances
|
||||
representing the found inner tags.
|
||||
'''
|
||||
if line.startswith(self.divDeletePrefix):
|
||||
return ('delete', line, None, None)
|
||||
if line.startswith(self.divInsertPrefix):
|
||||
# Return the line without the surrounding tag.
|
||||
action = 'insert'
|
||||
outerTag = htmlTag.match(line)
|
||||
line = outerTag.group(3)
|
||||
else:
|
||||
action = None
|
||||
outerTag = None
|
||||
# Replace found inner inserts with their content.
|
||||
innerDiffs = []
|
||||
while True:
|
||||
match = innerDiff.search(line)
|
||||
if not match: break
|
||||
# I found one.
|
||||
innerDiffs.append(match)
|
||||
line = self.applyDiff(line, match)
|
||||
return (action, line, innerDiffs, outerTag)
|
||||
|
||||
def computeTag(self, regexTag, content):
|
||||
'''p_regexTag is a re.MatchObject from regex htmlTag. p_content is a
|
||||
new content to put within this tag. This method produces the new
|
||||
string tag filled with p_content.'''
|
||||
# Recompute start tag from p_regexTag
|
||||
startTag = '<%s' % regexTag.group(1)
|
||||
# Add tag attributes if found
|
||||
if regexTag.group(2):
|
||||
startTag += regexTag.group(2)
|
||||
startTag += '>'
|
||||
# Recompute end tag
|
||||
endTag = '</%s>' % regexTag.group(1)
|
||||
# Wrap content info reified tag
|
||||
return startTag + content + endTag
|
||||
|
||||
def getSeqDiff(self, seqA, seqB, sep):
|
||||
'''p_seqA and p_seqB are lists of strings. Here we will try to identify
|
||||
similarities between strings from p_seqA and p_seqB, and return a
|
||||
list of differences between p_seqA and p_seqB, where each element
|
||||
is a tuple (action, line).
|
||||
* If p_action is "delete", "line" is a line of p_seqA considered as
|
||||
not included anymore in p_seqB;
|
||||
* If p_action is "insert", "line" is a line of p_seqB considered as
|
||||
not included in p_seqA;
|
||||
* If p_action is "replace", "line" is a tuple
|
||||
(lineA, lineB, previousDiffsA) containing one line from p_seqA and
|
||||
one from p_seqB considered as similar. "previousDiffsA" contains
|
||||
potential previous inner diffs that were found (but extracted
|
||||
from, for comparison purposes) lineA.
|
||||
'''
|
||||
res = []
|
||||
i = j = k = 0
|
||||
# Scan every string from p_seqA and try to find a similar string in
|
||||
# p_seqB.
|
||||
while i < len(seqA):
|
||||
pastAction, lineA, innerDiffs, outerTag=self.getLineAndType(seqA[i])
|
||||
if pastAction == 'delete':
|
||||
# We will consider this line as "equal" because it already has
|
||||
# been noted as deleted in a previous diff.
|
||||
res.append( ('equal', seqA[i]) )
|
||||
elif k == len(seqB):
|
||||
# We have already "consumed" every string from p_seqB. Remaining
|
||||
# strings from p_seqA must be considered as deleted (or
|
||||
# sometimes equal, see above)
|
||||
if not pastAction: res.append( ('delete', seqA[i]) )
|
||||
else:
|
||||
# 'insert': should not happen. The inserted line should also
|
||||
# be found in seqB.
|
||||
res.append( ('equal', seqA[i]) )
|
||||
else:
|
||||
# Try to find a line in seqB which is similar to lineA.
|
||||
similarFound = False
|
||||
for j in range(k, len(seqB)):
|
||||
if self.isSimilar(lineA, seqB[j]):
|
||||
similarFound = True
|
||||
# Strings between indices k and j in p_seqB must be
|
||||
# considered as inserted, because no similar line exists
|
||||
# in p_seqA.
|
||||
if k < j:
|
||||
for line in seqB[k:j]: res.append(('insert', line))
|
||||
# Similar strings are appended in a 'replace' entry,
|
||||
# excepted if lineA is already an insert from a
|
||||
# previous diff: in this case, we keep the "old"
|
||||
# version: the new one is the same, but for which we
|
||||
# don't remember who updated it.
|
||||
if (pastAction == 'insert') and (lineA == seqB[j]):
|
||||
res.append( ('equal', seqA[i]) )
|
||||
else:
|
||||
res.append(('replace', (lineA, seqB[j],
|
||||
innerDiffs, outerTag)))
|
||||
k = j+1
|
||||
break
|
||||
if not similarFound: res.append( ('delete', seqA[i]) )
|
||||
i += 1
|
||||
# Consider any "unconsumed" line from p_seqB as being inserted.
|
||||
if k < len(seqB):
|
||||
for line in seqB[k:]: res.append( ('insert', line) )
|
||||
# Merge similar diffs, excepted if separator is a carriage return
|
||||
if sep == '\n': return res
|
||||
newRes = []
|
||||
lastType = None
|
||||
for type, data in res:
|
||||
if lastType and (type != 'replace') and (lastType == type):
|
||||
newRes[-1] = (type, newRes[-1][1] + sep + data)
|
||||
else:
|
||||
newRes.append( (type, data) )
|
||||
lastType = type
|
||||
return newRes
|
||||
|
||||
def split(self, s, sep):
|
||||
'''Splits string p_s with p_sep. If p_sep is a space, the split can't
|
||||
happen for a leading or trailing space, which must be considered as
|
||||
being part of the first or last word.'''
|
||||
# Manage sep == \n
|
||||
if sep == '\n': return s.split(sep)
|
||||
leadSpace = s.startswith(sep)
|
||||
trailSpace = s.endswith(sep)
|
||||
if not leadSpace and not trailSpace: return s.split(sep)
|
||||
res = s.strip(sep).split(sep)
|
||||
if leadSpace: res[0] = sep + res[0]
|
||||
if trailSpace: res[-1] = res[-1] + sep
|
||||
return res
|
||||
|
||||
garbage = ('', '\r')
|
||||
def removeGarbage(self, l, sep):
|
||||
'''Removes from list p_l elements that have no interest, like blank
|
||||
strings or considered as is. Also: strip lines (ie, if sep is a
|
||||
carriage return.'''
|
||||
i = len(l)-1
|
||||
while i >= 0:
|
||||
if l[i] in self.garbage: del l[i]
|
||||
elif sep == '\n': l[i] = l[i].strip()
|
||||
i -= 1
|
||||
return l
|
||||
|
||||
def getStringDiff(self, old, new):
|
||||
'''Identifies the differences between strings p_old and p_new by
|
||||
computing:
|
||||
* i = the end index of the potential common starting part (if no
|
||||
common part is found, i=0);
|
||||
* jo = the start index in p_old of the potential common ending part;
|
||||
* jn = the start index in p_new of the potential common ending part.
|
||||
'''
|
||||
# Compute i
|
||||
i = -1
|
||||
diffFound = False
|
||||
while not diffFound:
|
||||
i += 1
|
||||
if (i == len(old)) or (i == len(new)): break
|
||||
if old[i] != new[i]: diffFound = True
|
||||
# i can't be inside an HTML tag.
|
||||
if (i > 0) and (old[i-1] == '<'): i -= 1
|
||||
# Compute jo and jn
|
||||
jo = len(old)
|
||||
jn = len(new)
|
||||
diffFound = False
|
||||
while not diffFound:
|
||||
if (jo == i) or (jn == i):
|
||||
# We have reached the end of substring old[i:] or new[i:]
|
||||
jo -=1
|
||||
jn -= 1
|
||||
break
|
||||
jo -= 1
|
||||
jn -= 1
|
||||
if old[jo] != new[jn]: diffFound=True
|
||||
return i, jo+1, jn+1
|
||||
|
||||
def getDumpPrefix(self, res, add, previousAdd, sep):
|
||||
'''In most cases, when concatenating the next diff (p_add) to the
|
||||
global result (p_res), I must prefix it with p_sep (excepted if p_res
|
||||
is still empty). But when p_sep is a space, no space must be inserted
|
||||
between 2 adjacent updates (p_add and p_previousAdd), because such a
|
||||
space was not in the original version. This method computes the
|
||||
prefix, that can thus be empty if this latter case is met.'''
|
||||
prefix = ''
|
||||
if not res: return prefix
|
||||
if (sep == ' ') and previousAdd and \
|
||||
previousAdd.endswith('</span>') and add.startswith('<span'):
|
||||
pass
|
||||
else:
|
||||
prefix = sep
|
||||
return prefix
|
||||
|
||||
def getReplacement(self, chunkA, chunkB, sep):
|
||||
'''p_chunkA has been replaced with p_chunkB. Compute this update and
|
||||
return it.'''
|
||||
res = ''
|
||||
# We know that some lines have been replaced from chunkA to chunkB. By
|
||||
# identifying similarities between those lines, consider some as having
|
||||
# been deleted, modified or inserted.
|
||||
previousAdd = None
|
||||
for action, line in self.getSeqDiff(chunkA, chunkB, sep):
|
||||
add = None
|
||||
if action in ('insert', 'delete'):
|
||||
add = self.getModifiedChunk(line, action, sep)
|
||||
elif action == 'equal':
|
||||
add = line
|
||||
elif action == 'replace':
|
||||
lineA, lineB, previousDiffsA, outerTagA = line
|
||||
# lineA has been replaced with lineB. Here, we will investigate
|
||||
# further here and explore differences at the *word* level
|
||||
# between lineA and lineB. previousDiffsA may contain a series
|
||||
# of updates (inserts, deletions) that have already been
|
||||
# performed on lineA. If lineA was a previously inserted line,
|
||||
# lineA comes without his outer tag, that lies in outerTagA
|
||||
# (as a re.MatchObject instance computed from regex htmlTag).
|
||||
# In that case, we will wrap the result with that tag.
|
||||
|
||||
# As a preamble, and in order to restrict annoyances due to the
|
||||
# presence of XHTML tags, we will remove start and end tags
|
||||
# from lineA and lineB if present.
|
||||
i, ja, jb = self.getStringDiff(lineA, lineB)
|
||||
diff = self.getHtmlDiff(lineA[i:ja], lineB[i:jb], ' ')
|
||||
add = lineB[:i] + diff + lineB[jb:]
|
||||
# Merge potential previous inner diff tags that were found (but
|
||||
# extracted from) lineA.
|
||||
if previousDiffsA:
|
||||
try:
|
||||
merger = Merger(lineA, add, previousDiffsA, self)
|
||||
add = merger.merge()
|
||||
except Merger.MergeError as e:
|
||||
# The merge algorithm has made a burn out. Simplify and
|
||||
# consider lineA has having been completely deleted and
|
||||
# lineB has completely inserted.
|
||||
add = self.getModifiedChunk(lineA, 'delete', sep) + \
|
||||
self.getModifiedChunk(lineB, 'insert', sep)
|
||||
# Rewrap line into outerTagA if lineA was a line tagged as
|
||||
# previously inserted.
|
||||
if outerTagA:
|
||||
add = self.computeTag(outerTagA, add)
|
||||
if add: res += self.getDumpPrefix(res, add, previousAdd, sep) + add
|
||||
previousAdd = add
|
||||
return res
|
||||
|
||||
def getHtmlDiff(self, old, new, sep):
|
||||
'''Returns the differences between p_old and p_new. Result is a string
|
||||
containing the comparison in HTML format. p_sep is used for turning
|
||||
p_old and p_new into sequences. If p_sep is a carriage return, this
|
||||
method is used for performing a whole diff between 2 strings splitted
|
||||
into sequences of lines; if sep is a space, the diff is a
|
||||
word-by-word comparison within 2 lines that have been detected as
|
||||
similar in a previous call to m_getHtmlDiff with sep=carriage
|
||||
return.'''
|
||||
res = ''
|
||||
a = self.split(old, sep)
|
||||
b = self.split(new, sep)
|
||||
matcher = difflib.SequenceMatcher()
|
||||
matcher.set_seqs(a, b)
|
||||
previousAdd = None
|
||||
for action, i1, i2, j1, j2 in matcher.get_opcodes():
|
||||
add = None
|
||||
# When sep is a space, we need to remember if we are dealing with
|
||||
# the last diff within the line or not.
|
||||
chunkA = self.removeGarbage(a[i1:i2], sep)
|
||||
chunkB = self.removeGarbage(b[j1:j2], sep)
|
||||
if action == 'equal':
|
||||
if chunkA: add = sep.join(chunkA)
|
||||
elif action == 'insert':
|
||||
if chunkB:
|
||||
add = self.getModifiedChunk(chunkB, action, sep)
|
||||
elif action == 'delete':
|
||||
if chunkA:
|
||||
add = self.getModifiedChunk(chunkA, action, sep)
|
||||
elif action == 'replace':
|
||||
if not chunkA and not chunkB:
|
||||
pass
|
||||
elif not chunkA:
|
||||
# Was an addition, not a replacement
|
||||
add = self.getModifiedChunk(chunkB, 'insert', sep)
|
||||
elif not chunkB:
|
||||
# Was a deletion, not a replacement
|
||||
add = self.getModifiedChunk(chunkA, 'delete', sep)
|
||||
else: # At least, a true replacement
|
||||
if (sep == ' ') and (sep not in chunkA) and \
|
||||
(sep not in chunkB):
|
||||
# By going here, we avoid infinite loops that may occur
|
||||
# between m_getHtmlDiff and m_getReplacement
|
||||
# (called below).
|
||||
add = self.getModifiedChunk(chunkA, 'delete', sep) + \
|
||||
self.getModifiedChunk(chunkB, 'insert', sep)
|
||||
else:
|
||||
add = self.getReplacement(chunkA, chunkB, sep)
|
||||
if add: res += self.getDumpPrefix(res, add, previousAdd, sep) + add
|
||||
previousAdd = add
|
||||
return res
|
||||
|
||||
def get(self):
|
||||
'''Produces the result.'''
|
||||
# Normally, if self.old is empty, the whole self.new should be
|
||||
# considered as inserted text. But see this line:
|
||||
if not self.old or not self.old.strip(): return self.new
|
||||
# Why? This is for avoiding problems in the case of cumulative diffs.
|
||||
# A cumulative diff means: calling HtmlDiff with, as old value, the
|
||||
# result of a previous call to HtmlDiff. In this case, if the whole text
|
||||
# is already considered as inserted, we will already have overlaps in
|
||||
# the next diff. Overlaps are hard to manage, so we avoid to get them
|
||||
# as a starting point when computing cumulative diffs.
|
||||
return self.getHtmlDiff(self.old, self.new, '\n')
|
||||
# ------------------------------------------------------------------------------
|
31
appy/shared/errors.py
Normal file
31
appy/shared/errors.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
# Appy is a framework for building applications in the Python language.
|
||||
# Copyright (C) 2007 Gaetan Delannay
|
||||
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class AppyError(Exception):
|
||||
'''Root Appy exception class.'''
|
||||
pass
|
||||
|
||||
class ValidationError(AppyError):
|
||||
'''Represents an error that occurs on data sent to the Appy server.'''
|
||||
pass
|
||||
|
||||
class InternalError(AppyError):
|
||||
'''Represents a programming error: something that should never occur.'''
|
||||
pass
|
||||
# ------------------------------------------------------------------------------
|
281
appy/shared/ldap_connector.py
Normal file
281
appy/shared/ldap_connector.py
Normal file
|
@ -0,0 +1,281 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
import string
|
||||
try:
|
||||
import ldap
|
||||
except ImportError:
|
||||
# For people that do not care about ldap
|
||||
ldap = None
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class LdapConfig:
|
||||
'''Parameters for authenticating users to an LDAP server. This class is
|
||||
used by gen-applications. For a pure, appy-independent LDAP connector,
|
||||
see the class LdapConnector below.'''
|
||||
ldapAttributes = { 'loginAttribute':None, 'emailAttribute':'email',
|
||||
'fullNameAttribute':'title',
|
||||
'firstNameAttribute':'firstName',
|
||||
'lastNameAttribute':'name' }
|
||||
|
||||
def __init__(self):
|
||||
self.server = '' # Name of the LDAP server
|
||||
self.port = None # Port for this server
|
||||
# Login and password of the technical power user that the Appy
|
||||
# application will use to connect to the LDAP.
|
||||
self.adminLogin = ''
|
||||
self.adminPassword = ''
|
||||
# LDAP attribute to use as login for authenticating users.
|
||||
self.loginAttribute = 'dn' # Can also be "mail", "sAMAccountName", "cn"
|
||||
# LDAP attributes for storing email
|
||||
self.emailAttribute = None
|
||||
# LDAP attribute for storing full name (first + last name)
|
||||
self.fullNameAttribute = None
|
||||
# Alternately, LDAP attributes for storing 1st & last names separately.
|
||||
self.firstNameAttribute = None
|
||||
self.lastNameAttribute = None
|
||||
# LDAP classes defining the users stored in the LDAP.
|
||||
self.userClasses = ('top', 'person')
|
||||
self.baseDn = '' # Base DN where to find users in the LDAP.
|
||||
self.scope = 'SUBTREE' # Scope of the search within self.baseDn
|
||||
# Is this server connection enabled ?
|
||||
self.enabled = True
|
||||
# The "user map" allows to put LDAP users into groups or assign them
|
||||
# roles. This dict will be used every time a local User will be created.
|
||||
# It can be while synchronizing all users (see m_synchronizeUsers
|
||||
# below) or when the user logs in for the first time (see m_getUser
|
||||
# below). This dict will NOT be used subsequently, when updating the
|
||||
# User instance. Every key must be a user login. Every value is an
|
||||
# appy.Object instance having the optional attributes:
|
||||
# "groups": a list of group IDs (logins);
|
||||
# "roles": a list of global role names.
|
||||
self.userMap = {}
|
||||
|
||||
def __repr__(self):
|
||||
'''Short string representation of this ldap config, for logging and
|
||||
debugging purposes.'''
|
||||
return self.getServerUri()
|
||||
|
||||
def getServerUri(self):
|
||||
'''Returns the complete URI for accessing the LDAP, ie
|
||||
"ldap://some.ldap.server:389".'''
|
||||
port = self.port or 389
|
||||
return 'ldap://%s:%d' % (self.server, port)
|
||||
|
||||
def getUserFilterValues(self, login=None):
|
||||
'''Gets the filter values required to perform a query for finding user
|
||||
corresponding to p_login in the LDAP, or all users if p_login is
|
||||
None.'''
|
||||
res = login and [(self.loginAttribute, login)] or []
|
||||
for userClass in self.userClasses:
|
||||
res.append( ('objectClass', userClass) )
|
||||
return res
|
||||
|
||||
def getUserAttributes(self):
|
||||
'''Gets the attributes we want to get from the LDAP for characterizing
|
||||
a user.'''
|
||||
res = []
|
||||
for name in self.ldapAttributes.iterkeys():
|
||||
if getattr(self, name):
|
||||
res.append(getattr(self, name))
|
||||
return res
|
||||
|
||||
def getUserParams(self, ldapData):
|
||||
'''Formats the user-related p_ldapData retrieved from the ldap, as a
|
||||
dict of params usable for creating or updating the corresponding
|
||||
Appy user.'''
|
||||
res = {}
|
||||
for name, appyName in self.ldapAttributes.iteritems():
|
||||
if not appyName: continue
|
||||
# Get the name of the attribute as known in the LDAP
|
||||
ldapName = getattr(self, name)
|
||||
if not ldapName: continue
|
||||
if ldapData.has_key(ldapName) and ldapData[ldapName]:
|
||||
value = ldapData[ldapName]
|
||||
if isinstance(value, list): value = value[0]
|
||||
res[appyName] = value
|
||||
return res
|
||||
|
||||
def setLocalUser(self, tool, attrs, login, password=None):
|
||||
'''Creates or updates the local User instance corresponding to a LDAP
|
||||
user from the LDAP, having p_login. Its other attributes are in
|
||||
p_attrs and, when relevant, its password is in p_password. This
|
||||
method returns a 2-tuple containing:
|
||||
* the local User instance;
|
||||
* the status of the operation:
|
||||
- "created" if the instance has been created,
|
||||
- "updated" if at least one data from p_attrs is different from the
|
||||
one stored on the existing User instance;
|
||||
- None else.
|
||||
'''
|
||||
# Do we already have a local User instance for this user ?
|
||||
status = None
|
||||
user = tool.search1('User', noSecurity=True, login=login)
|
||||
if user:
|
||||
# Yes. Update it with info about him from the LDAP
|
||||
for name, value in attrs.iteritems():
|
||||
currentValue = getattr(user, name)
|
||||
if value != currentValue:
|
||||
setattr(user, name, value)
|
||||
status = 'updated'
|
||||
# Update user password, if given
|
||||
if password: user.setPassword(password, log=False)
|
||||
user.reindex()
|
||||
else:
|
||||
# Create the user
|
||||
user = tool.create('users', noSecurity=True, login=login,
|
||||
source='ldap', **attrs)
|
||||
if password: user.setPassword(password, log=False)
|
||||
status = 'created'
|
||||
# Put him into groups and/or grant him some roles according to
|
||||
# self.userMap.
|
||||
if login in self.userMap:
|
||||
privileges = self.userMap[login]
|
||||
# Put the user in some groups
|
||||
groups = getattr(privileges, 'groups', None)
|
||||
if groups:
|
||||
for groupLogin in groups:
|
||||
group = tool.search1('Group', noSecurity=True,
|
||||
login=groupLogin)
|
||||
group.link('users', user)
|
||||
# Grant him some roles
|
||||
roles = getattr(privileges, 'roles', None)
|
||||
if roles:
|
||||
for role in roles: user.addRole(role)
|
||||
tool.log('%s: automatic privileges set.' % login)
|
||||
return user, status
|
||||
|
||||
def getUser(self, tool, login, password):
|
||||
'''Returns a local User instance corresponding to a LDAP user if p_login
|
||||
and p_password correspond to a valid LDAP user.'''
|
||||
# Check if LDAP is enabled
|
||||
if not self.enabled: return
|
||||
# Get a connector to the LDAP server and connect to the LDAP server
|
||||
serverUri = self.getServerUri()
|
||||
connector = LdapConnector(serverUri, tool=tool)
|
||||
success, msg = connector.connect(self.adminLogin, self.adminPassword)
|
||||
if not success: return
|
||||
# Check if the user corresponding to p_login exists in the LDAP
|
||||
filter = connector.getFilter(self.getUserFilterValues(login))
|
||||
params = self.getUserAttributes()
|
||||
ldapData = connector.search(self.baseDn, self.scope, filter, params)
|
||||
if not ldapData: return
|
||||
# The user exists. Try to connect to the LDAP with this user in order
|
||||
# to validate its password.
|
||||
userConnector = LdapConnector(serverUri, tool=tool)
|
||||
success, msg = userConnector.connect(ldapData[0][0], password)
|
||||
if not success: return
|
||||
# The password is correct. We can create/update our local user
|
||||
# corresponding to this LDAP user.
|
||||
userParams = self.getUserParams(ldapData[0][1])
|
||||
user, status = self.setLocalUser(tool, userParams, login, password)
|
||||
return user
|
||||
|
||||
def synchronizeUsers(self, tool):
|
||||
'''Synchronizes the local User copies with this LDAP user base. Returns
|
||||
a 2-tuple containing the number of created, updated and untouched
|
||||
local copies.'''
|
||||
if not self.enabled: raise Exception('LDAP config not enabled.')
|
||||
# Get a connector to the LDAP server and connect to the LDAP server
|
||||
serverUri = self.getServerUri()
|
||||
tool.log('reading users from %s...' % serverUri)
|
||||
connector = LdapConnector(serverUri, tool=tool)
|
||||
success, msg = connector.connect(self.adminLogin, self.adminPassword)
|
||||
if not success: raise Exception('Could not connect to %s' % serverUri)
|
||||
# Query the LDAP for users. Perform several queries to avoid having
|
||||
# error ldap.SIZELIMIT_EXCEEDED.
|
||||
params = self.getUserAttributes()
|
||||
# Count the number of created, updated and untouched users
|
||||
created = updated = untouched = 0
|
||||
for letter in string.ascii_lowercase:
|
||||
# Get all the users whose login starts with "letter"
|
||||
filter = connector.getFilter(self.getUserFilterValues('%s*'%letter))
|
||||
ldapData = connector.search(self.baseDn, self.scope, filter, params)
|
||||
if not ldapData: continue
|
||||
for userData in ldapData:
|
||||
# Get the user login
|
||||
login = userData[1][self.loginAttribute][0]
|
||||
# Get the other user parameters, as Appy wants it
|
||||
userParams = self.getUserParams(userData[1])
|
||||
# Create or update the user
|
||||
user, status = self.setLocalUser(tool, userParams, login)
|
||||
if status == 'created': created += 1
|
||||
elif status == 'updated': updated += 1
|
||||
else: untouched += 1
|
||||
tool.log('users synchronization: %d local user(s) created, ' \
|
||||
'%d updated and %d untouched.'% (created, updated, untouched))
|
||||
return created, updated, untouched
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class LdapConnector:
|
||||
'''This class manages the communication with a LDAP server.'''
|
||||
def __init__(self, serverUri, tentatives=5, ssl=False, timeout=5,
|
||||
tool=None):
|
||||
# The URI of the LDAP server, ie ldap://some.ldap.server:389.
|
||||
self.serverUri = serverUri
|
||||
# The object that will represent the LDAP server
|
||||
self.server = None
|
||||
# The number of trials the connector will at most perform to the LDAP
|
||||
# server, when executing a query in it.
|
||||
self.tentatives = tentatives
|
||||
self.ssl = ssl
|
||||
# The timeout for every query to the LDAP.
|
||||
self.timeout = timeout
|
||||
# A tool from a Appy application can be given and will be used, ie for
|
||||
# logging purpose.
|
||||
self.tool = tool
|
||||
|
||||
def log(self, message, type='info'):
|
||||
'''Logs via a Appy tool if available.'''
|
||||
if self.tool:
|
||||
self.tool.log(message, type=type)
|
||||
else:
|
||||
print(message)
|
||||
|
||||
def connect(self, login, password):
|
||||
'''Connects to the LDAP server using p_login and p_password as
|
||||
credentials. If the connection succeeds, a server object is created
|
||||
in self.server and tuple (True, None) is returned. Else, tuple
|
||||
(False, errorMessage) is returned.'''
|
||||
try:
|
||||
self.server = ldap.initialize(self.serverUri)
|
||||
self.server.simple_bind_s(login, password)
|
||||
return True, None
|
||||
except AttributeError as ae:
|
||||
# When the ldap module is not there, trying to catch ldap.LDAPError
|
||||
# will raise an error.
|
||||
message = str(ae)
|
||||
self.log('Ldap connect error with login %s (%s).' % \
|
||||
(login, message))
|
||||
return False, message
|
||||
except ldap.LDAPError as le:
|
||||
message = str(le)
|
||||
self.log('%s: connect error with login %s (%s).' % \
|
||||
(self.serverUri, login, message))
|
||||
return False, message
|
||||
|
||||
def getFilter(self, values):
|
||||
'''Builds and returns a LDAP filter based on p_values, a tuple or list
|
||||
of tuples (name,value).'''
|
||||
return '(&%s)' % ''.join(['(%s=%s)' % (n, v) for n, v in values])
|
||||
|
||||
def search(self, baseDn, scope, filter, attributes=None):
|
||||
'''Performs a query in the LDAP at node p_baseDn, with the given
|
||||
p_scope. p_filter is a LDAP filter that constraints the search. It
|
||||
can be computed from a list of tuples (value, name) by method
|
||||
m_getFilter. p_attributes is the list of attributes that we will
|
||||
retrieve from the LDAP. If None, all attributes will be retrieved.'''
|
||||
if self.ssl: self.server.start_tls_s()
|
||||
try:
|
||||
# Get the LDAP constant corresponding to p_scope.
|
||||
scope = getattr(ldap, 'SCOPE_%s' % scope)
|
||||
# Perform the query.
|
||||
for i in range(self.tentatives):
|
||||
try:
|
||||
return self.server.search_st(\
|
||||
baseDn, scope, filterstr=filter, attrlist=attributes,
|
||||
timeout=self.timeout)
|
||||
except ldap.TIMEOUT:
|
||||
pass
|
||||
except ldap.LDAPError as le:
|
||||
self.log('LDAP query error %s: %s' % \
|
||||
(le.__class__.__name__, str(le)))
|
||||
# ------------------------------------------------------------------------------
|
58
appy/shared/odf.py
Normal file
58
appy/shared/odf.py
Normal file
|
@ -0,0 +1,58 @@
|
|||
'''This module contains some useful classes for constructing ODF documents
|
||||
programmatically.'''
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class OdtTable:
|
||||
'''This class allows to construct an ODT table programmatically.'''
|
||||
# Some namespace definitions
|
||||
tns = 'table:'
|
||||
txns = 'text:'
|
||||
|
||||
def __init__(self, tableName, paraStyle, cellStyle,
|
||||
paraHeaderStyle, cellHeaderStyle, nbOfCols):
|
||||
self.tableName = tableName
|
||||
self.paraStyle = paraStyle
|
||||
self.cellStyle = cellStyle
|
||||
self.paraHeaderStyle = paraHeaderStyle
|
||||
self.cellHeaderStyle = cellHeaderStyle
|
||||
self.nbOfCols = nbOfCols
|
||||
self.res = ''
|
||||
|
||||
def dumpCell(self, content, span=1, header=False):
|
||||
if header:
|
||||
paraStyleName = self.paraHeaderStyle
|
||||
cellStyleName = self.cellHeaderStyle
|
||||
else:
|
||||
paraStyleName = self.paraStyle
|
||||
cellStyleName = self.cellStyle
|
||||
self.res += '<%stable-cell %sstyle-name="%s" ' \
|
||||
'%snumber-columns-spanned="%d">' % \
|
||||
(self.tns, self.tns, cellStyleName, self.tns, span)
|
||||
self.res += '<%sp %sstyle-name="%s">%s</%sp>' % \
|
||||
(self.txns, self.txns, paraStyleName, content, self.txns)
|
||||
self.res += '</%stable-cell>' % self.tns
|
||||
|
||||
def startRow(self):
|
||||
self.res += '<%stable-row>' % self.tns
|
||||
|
||||
def endRow(self):
|
||||
self.res += '</%stable-row>' % self.tns
|
||||
|
||||
def startTable(self):
|
||||
self.res += '<%stable %sname="AnalysisTable">' % (self.tns, self.tns)
|
||||
self.res += '<%stable-column %snumber-columns-repeated="%d"/>' % \
|
||||
(self.tns, self.tns, self.nbOfCols)
|
||||
|
||||
def endTable(self):
|
||||
self.res += '</%stable>' % self.tns
|
||||
|
||||
def dumpFloat(self, number):
|
||||
return str(round(number, 2))
|
||||
|
||||
def get(self):
|
||||
'''Returns the whole table.'''
|
||||
self.startTable()
|
||||
self.getRows()
|
||||
self.endTable()
|
||||
return self.res.decode('utf-8')
|
||||
# ------------------------------------------------------------------------------
|
337
appy/shared/packaging.py
Normal file
337
appy/shared/packaging.py
Normal file
|
@ -0,0 +1,337 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
import os, os.path, subprocess, md5, shutil
|
||||
from appy.shared.utils import getOsTempFolder, FolderDeleter, cleanFolder
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
debianInfo = '''Package: python-appy%s
|
||||
Version: %s
|
||||
Architecture: all
|
||||
Maintainer: Gaetan Delannay <gaetan.delannay@geezteem.com>
|
||||
Installed-Size: %d
|
||||
Depends: python (>= %s)%s
|
||||
Section: python
|
||||
Priority: optional
|
||||
Homepage: http://appyframework.org
|
||||
Description: Appy builds simple but complex web Python apps.
|
||||
'''
|
||||
appCtl = '''#! /usr/lib/zope2.12/bin/python
|
||||
import sys
|
||||
from appy.bin.zopectl import ZopeRunner
|
||||
args = ' '.join(sys.argv[1:])
|
||||
sys.argv = [sys.argv[0], '-C', '/etc/%s.conf', args]
|
||||
ZopeRunner().run()
|
||||
'''
|
||||
appRun = '''#! /bin/sh
|
||||
exec "/usr/lib/zope2.12/bin/runzope" -C "/etc/%s.conf" "$@"
|
||||
'''
|
||||
ooStart = '#! /bin/sh\nsoffice -invisible -headless -nofirststartwizard ' \
|
||||
'"-accept=socket,host=localhost,port=2002;urp;"'
|
||||
zopeConf = '''# Zope configuration.
|
||||
%%define INSTANCE %s
|
||||
%%define DATA %s
|
||||
%%define LOG %s
|
||||
%%define HTTPPORT %s
|
||||
%%define ZOPE_USER zope
|
||||
|
||||
instancehome $INSTANCE
|
||||
effective-user $ZOPE_USER
|
||||
%s
|
||||
<eventlog>
|
||||
level info
|
||||
<logfile>
|
||||
path $LOG/event.log
|
||||
level info
|
||||
</logfile>
|
||||
</eventlog>
|
||||
<logger access>
|
||||
level WARN
|
||||
<logfile>
|
||||
path $LOG/Z2.log
|
||||
format %%(message)s
|
||||
</logfile>
|
||||
</logger>
|
||||
<http-server>
|
||||
address $HTTPPORT
|
||||
</http-server>
|
||||
<zodb_db main>
|
||||
<filestorage>
|
||||
path $DATA/Data.fs
|
||||
</filestorage>
|
||||
mount-point /
|
||||
</zodb_db>
|
||||
<zodb_db temporary>
|
||||
<temporarystorage>
|
||||
name temporary storage for sessioning
|
||||
</temporarystorage>
|
||||
mount-point /temp_folder
|
||||
container-class Products.TemporaryFolder.TemporaryContainer
|
||||
</zodb_db>
|
||||
'''
|
||||
# initScript below will be used to define the scripts that will run the
|
||||
# app-powered Zope instance and OpenOffice in server mode at boot time.
|
||||
initScript = '''#! /bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: %s
|
||||
# Required-Start: $syslog $remote_fs
|
||||
# Required-Stop: $syslog $remote_fs
|
||||
# Should-Start: $remote_fs
|
||||
# Should-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Start %s
|
||||
# Description: %s
|
||||
### END INIT INFO
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
%s
|
||||
;;
|
||||
restart|reload|force-reload)
|
||||
%s
|
||||
;;
|
||||
stop)
|
||||
%s
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 start|restart|stop" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
'''
|
||||
|
||||
class Debianizer:
|
||||
'''This class allows to produce a Debian package from a Python (Appy)
|
||||
package.'''
|
||||
|
||||
def __init__(self, app, out, appVersion='0.1.0',
|
||||
pythonVersions=('2.6',), zopePort=8080,
|
||||
depends=('zope2.12', 'openoffice.org', 'imagemagick'),
|
||||
sign=False):
|
||||
# app is the path to the Python package to Debianize.
|
||||
self.app = app
|
||||
self.appName = os.path.basename(app)
|
||||
self.appNameLower = self.appName.lower()
|
||||
# Must we sign the Debian package? If yes, we make the assumption that
|
||||
# the currently logged user has a public/private key pair in ~/.gnupg,
|
||||
# generated with command "gpg --gen-key".
|
||||
self.sign = sign
|
||||
# out is the folder where the Debian package will be generated.
|
||||
self.out = out
|
||||
# What is the version number for this app ?
|
||||
self.appVersion = appVersion
|
||||
# On which Python versions will the Debian package depend?
|
||||
self.pythonVersions = pythonVersions
|
||||
# Port for Zope
|
||||
self.zopePort = zopePort
|
||||
# Debian package dependencies
|
||||
self.depends = depends
|
||||
# Zope 2.12 requires Python 2.6
|
||||
if 'zope2.12' in depends: self.pythonVersions = ('2.6',)
|
||||
|
||||
def run(self):
|
||||
'''Generates the Debian package.'''
|
||||
curdir = os.getcwd()
|
||||
j = os.path.join
|
||||
tempFolder = getOsTempFolder()
|
||||
# Create, in the temp folder, the required sub-structure for the Debian
|
||||
# package.
|
||||
debFolder = j(tempFolder, 'debian')
|
||||
if os.path.exists(debFolder):
|
||||
FolderDeleter.delete(debFolder)
|
||||
# Copy the Python package into it
|
||||
srcFolder = j(debFolder, 'usr', 'lib')
|
||||
for version in self.pythonVersions:
|
||||
libFolder = j(srcFolder, 'python%s' % version)
|
||||
os.makedirs(libFolder)
|
||||
destFolder = j(libFolder, self.appName)
|
||||
shutil.copytree(self.app, destFolder)
|
||||
# Clean dest folder (.svn/.bzr files)
|
||||
cleanFolder(destFolder, folders=('.svn', '.bzr'))
|
||||
# When packaging Appy itself, everything is in /usr/lib/pythonX. When
|
||||
# packaging an Appy app, we will generate more files for creating a
|
||||
# running instance.
|
||||
if self.appName != 'appy':
|
||||
# Create the folders that will collectively represent the deployed
|
||||
# Zope instance.
|
||||
binFolder = j(debFolder, 'usr', 'bin')
|
||||
os.makedirs(binFolder)
|
||||
# <app>ctl
|
||||
name = '%s/%sctl' % (binFolder, self.appNameLower)
|
||||
f = file(name, 'w')
|
||||
f.write(appCtl % self.appNameLower)
|
||||
os.chmod(name, 0o744) # Make it executable by owner.
|
||||
f.close()
|
||||
# <app>run
|
||||
name = '%s/%srun' % (binFolder, self.appNameLower)
|
||||
f = file(name, 'w')
|
||||
f.write(appRun % self.appNameLower)
|
||||
os.chmod(name, 0o744) # Make it executable by owner.
|
||||
f.close()
|
||||
# startoo
|
||||
name = '%s/startoo' % binFolder
|
||||
f = file(name, 'w')
|
||||
f.write(ooStart)
|
||||
f.close()
|
||||
os.chmod(name, 0o744) # Make it executable by owner.
|
||||
# /var/lib/<app> (will store Data.fs, lock files, etc)
|
||||
varLibFolder = j(debFolder, 'var', 'lib', self.appNameLower)
|
||||
os.makedirs(varLibFolder)
|
||||
f = file('%s/README' % varLibFolder, 'w')
|
||||
f.write('This folder stores the %s database.\n' % self.appName)
|
||||
f.close()
|
||||
# /var/log/<app> (will store event.log and Z2.log)
|
||||
varLogFolder = j(debFolder, 'var', 'log', self.appNameLower)
|
||||
os.makedirs(varLogFolder)
|
||||
f = file('%s/README' % varLogFolder, 'w')
|
||||
f.write('This folder stores the log files for %s.\n' % self.appName)
|
||||
f.close()
|
||||
# /etc/<app>.conf (Zope configuration file)
|
||||
etcFolder = j(debFolder, 'etc')
|
||||
os.makedirs(etcFolder)
|
||||
name = '%s/%s.conf' % (etcFolder, self.appNameLower)
|
||||
n = self.appNameLower
|
||||
f = file(name, 'w')
|
||||
productsFolder = '/usr/lib/python%s/%s/zope' % \
|
||||
(self.pythonVersions[0], self.appName)
|
||||
f.write(zopeConf % ('/var/lib/%s' % n, '/var/lib/%s' % n,
|
||||
'/var/log/%s' % n, str(self.zopePort),
|
||||
'products %s\n' % productsFolder))
|
||||
f.close()
|
||||
# /etc/init.d/<app> (start the app at boot time)
|
||||
initdFolder = j(etcFolder, 'init.d')
|
||||
os.makedirs(initdFolder)
|
||||
name = '%s/%s' % (initdFolder, self.appNameLower)
|
||||
f = file(name, 'w')
|
||||
n = self.appNameLower
|
||||
f.write(initScript % (n, n, 'Start Zope with the Appy-based %s ' \
|
||||
'application.' % n, '%sctl start' % n,
|
||||
'%sctl restart' % n, '%sctl stop' % n))
|
||||
f.close()
|
||||
os.chmod(name, 0o744) # Make it executable by owner.
|
||||
# /etc/init.d/oo (start OpenOffice at boot time)
|
||||
name = '%s/oo' % initdFolder
|
||||
f = file(name, 'w')
|
||||
f.write(initScript % ('oo', 'oo', 'Start OpenOffice in server mode',
|
||||
'startoo', 'startoo', "#Can't stop OO."))
|
||||
f.write('\n')
|
||||
f.close()
|
||||
os.chmod(name, 0o744) # Make it executable by owner.
|
||||
# Get the size of the app, in Kb.
|
||||
os.chdir(tempFolder)
|
||||
cmd = subprocess.Popen(['du', '-b', '-s', 'debian'],
|
||||
stdout=subprocess.PIPE)
|
||||
size = int(int(cmd.stdout.read().split()[0])/1024.0)
|
||||
os.chdir(debFolder)
|
||||
# Create data.tar.gz based on it.
|
||||
os.system('tar czvf data.tar.gz *')
|
||||
# Create the control file
|
||||
f = file('control', 'w')
|
||||
nameSuffix = ''
|
||||
dependencies = []
|
||||
if self.appName != 'appy':
|
||||
nameSuffix = '-%s' % self.appNameLower
|
||||
dependencies.append('python-appy')
|
||||
if self.depends:
|
||||
for d in self.depends: dependencies.append(d)
|
||||
depends = ''
|
||||
if dependencies:
|
||||
depends = ', ' + ', '.join(dependencies)
|
||||
f.write(debianInfo % (nameSuffix, self.appVersion, size,
|
||||
self.pythonVersions[0], depends))
|
||||
f.close()
|
||||
# Create md5sum file
|
||||
f = file('md5sums', 'w')
|
||||
toWalk = ['usr']
|
||||
if self.appName != 'appy':
|
||||
toWalk += ['etc', 'var']
|
||||
for folderToWalk in toWalk:
|
||||
for dir, dirnames, filenames in os.walk(folderToWalk):
|
||||
for name in filenames:
|
||||
m = md5.new()
|
||||
pathName = j(dir, name)
|
||||
currentFile = file(pathName, 'rb')
|
||||
while True:
|
||||
data = currentFile.read(8096)
|
||||
if not data:
|
||||
break
|
||||
m.update(data)
|
||||
currentFile.close()
|
||||
# Add the md5 sum to the file
|
||||
f.write('%s %s\n' % (m.hexdigest(), pathName))
|
||||
f.close()
|
||||
# Create postinst, a script that will:
|
||||
# - bytecompile Python files after the Debian install
|
||||
# - change ownership of some files if required
|
||||
# - [in the case of an app-package] call update-rc.d for starting it at
|
||||
# boot time.
|
||||
f = file('postinst', 'w')
|
||||
content = '#!/bin/sh\nset -e\n'
|
||||
for version in self.pythonVersions:
|
||||
bin = '/usr/bin/python%s' % version
|
||||
lib = '/usr/lib/python%s' % version
|
||||
cmds = ' %s -m compileall -q %s/%s 2> /dev/null\n' % (bin, lib,
|
||||
self.appName)
|
||||
content += 'if [ -e %s ]\nthen\n%sfi\n' % (bin, cmds)
|
||||
if self.appName != 'appy':
|
||||
# Allow user "zope", that runs the Zope instance, to write the
|
||||
# database and log files.
|
||||
content += 'chown -R zope:root /var/lib/%s\n' % self.appNameLower
|
||||
content += 'chown -R zope:root /var/log/%s\n' % self.appNameLower
|
||||
# Call update-rc.d for starting the app at boot time
|
||||
content += 'update-rc.d %s defaults\n' % self.appNameLower
|
||||
content += 'update-rc.d oo defaults\n'
|
||||
# (re-)start the app
|
||||
content += '%sctl restart\n' % self.appNameLower
|
||||
# (re-)start oo
|
||||
content += 'startoo\n'
|
||||
f.write(content)
|
||||
f.close()
|
||||
# Create prerm, a script that will remove all pyc files before removing
|
||||
# the Debian package.
|
||||
f = file('prerm', 'w')
|
||||
content = '#!/bin/sh\nset -e\n'
|
||||
for version in self.pythonVersions:
|
||||
content += 'find /usr/lib/python%s/%s -name "*.pyc" -delete\n' % \
|
||||
(version, self.appName)
|
||||
f.write(content)
|
||||
f.close()
|
||||
# Create control.tar.gz
|
||||
os.system('tar czvf control.tar.gz ./control ./md5sums ./postinst ' \
|
||||
'./prerm')
|
||||
# Create debian-binary
|
||||
f = file('debian-binary', 'w')
|
||||
f.write('2.0\n')
|
||||
f.close()
|
||||
# Create the signature if required
|
||||
if self.sign:
|
||||
# Create the concatenated version of all files within the deb
|
||||
os.system('cat debian-binary control.tar.gz data.tar.gz > ' \
|
||||
'/tmp/combined-contents')
|
||||
os.system('gpg -abs -o _gpgorigin /tmp/combined-contents')
|
||||
signFile = '_gpgorigin '
|
||||
os.remove('/tmp/combined-contents')
|
||||
# Export the public key and name it according to its ID as found by
|
||||
# analyzing the result of command "gpg --fingerprint".
|
||||
cmd = subprocess.Popen(['gpg', '--fingerprint'],
|
||||
stdout=subprocess.PIPE)
|
||||
fingerprint = cmd.stdout.read().split('\n')
|
||||
id = 'pubkey'
|
||||
for line in fingerprint:
|
||||
if '=' not in line: continue
|
||||
id = line.split('=')[1].strip()
|
||||
id = ''.join(id.split()[-4:])
|
||||
break
|
||||
os.system('gpg --export -a > %s/%s.asc' % (self.out, id))
|
||||
else:
|
||||
signFile = ''
|
||||
# Create the .deb package
|
||||
debName = 'python-appy%s-%s.deb' % (nameSuffix, self.appVersion)
|
||||
os.system('ar -r %s %sdebian-binary control.tar.gz data.tar.gz' % \
|
||||
(debName, signFile))
|
||||
# Move it to self.out
|
||||
os.rename(j(debFolder, debName), j(self.out, debName))
|
||||
# Clean temp files
|
||||
FolderDeleter.delete(debFolder)
|
||||
os.chdir(curdir)
|
||||
# ------------------------------------------------------------------------------
|
502
appy/shared/rtf.py
Normal file
502
appy/shared/rtf.py
Normal file
|
@ -0,0 +1,502 @@
|
|||
# -*- coding: iso-8859-15 -*-
|
||||
# ------------------------------------------------------------------------------
|
||||
# Appy is a framework for building applications in the Python language.
|
||||
# Copyright (C) 2007 Gaetan Delannay
|
||||
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA.
|
||||
|
||||
'''RTF table parser.
|
||||
|
||||
This parser reads RTF documents that conform to the following.
|
||||
- Each table must have a first row with only one cell: the table name.
|
||||
- The other rows must all have the same number of columns. This number must
|
||||
be strictly greater than 1.'''
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
import re, sys, collections, UserDict
|
||||
from io import StringIO
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class ParserError(Exception): pass
|
||||
class TypeError(Exception): pass
|
||||
|
||||
# ParserError-related constants ------------------------------------------------
|
||||
BAD_PARENT_ROW = 'For table "%s", you specified "%s" as parent ' \
|
||||
'table, but you referred to row number "%s" ' \
|
||||
'within the parent. This value must be a positive ' \
|
||||
'integer or zero (we start counting rows at 0).'
|
||||
PARENT_NOT_FOUND = 'I cannot find table "%s" that you defined as being ' \
|
||||
'parent of "%s".'
|
||||
TABLE_KEY_ERROR = 'Within a row of table "%s", you mention a column named ' \
|
||||
'"%s" which does not exist neither in "%s" itself, ' \
|
||||
'neither in its parent row(s). '
|
||||
PARENT_ROW_NOT_FOUND = 'You specified table "%s" as inheriting from table ' \
|
||||
'"%s", row "%d", but this row does not exist (table ' \
|
||||
'"%s" as a length = %d). Note that we start counting ' \
|
||||
'rows at 0.'
|
||||
PARENT_COLUMN_NOT_FOUND = 'You specified table "%s" as inheriting from table ' \
|
||||
'"%s", column "%s", but this column does not exist ' \
|
||||
'in table "%s" or parents.'
|
||||
PARENT_ROW_COL_NOT_FOUND = 'You specified table "%s" as inheriting from ' \
|
||||
'table "%s", column "%s", value "%s", but it does ' \
|
||||
'not correspond to any row in table "%s".'
|
||||
NO_ROWS_IN_TABLE_YET = 'In first row of table "%s", you use value \' " \' ' \
|
||||
'for referencing the cell value in previous row, ' \
|
||||
'which does not exist.'
|
||||
VALUE_ERROR = 'Value error for column "%s" of table "%s". %s'
|
||||
TYPE_ERROR = 'Type error for column "%s" of table "%s". %s'
|
||||
|
||||
# TypeError-related constants -------------------------------------------------
|
||||
LIST_TYPE_ERROR = 'Maximum number of nested lists is 4.'
|
||||
BASIC_TYPE_ERROR = 'Letter "%s" does not correspond to any valid type. ' \
|
||||
'Valid types are f (float), i (int), g (long) and b (bool).'
|
||||
BASIC_VALUE_ERROR = 'Value "%s" can\'t be converted to type "%s".'
|
||||
LIST_VALUE_ERROR = 'Value "%s" is malformed: within it, %s. You should check ' \
|
||||
'the use of separators ( , : ; - ) to obtain a schema ' \
|
||||
'conform to the type "%s"'
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class Type:
|
||||
basicTypes = {'f': float, 'i':int, 'g':int, 'b':bool}
|
||||
separators = ['-', ';', ',', ':']
|
||||
def __init__(self, typeDecl):
|
||||
self.basicType = None # The python basic type
|
||||
self.listNumber = 0
|
||||
# If = 1 : it is a list. If = 2: it is a list of lists. If = 3...
|
||||
self.analyseTypeDecl(typeDecl)
|
||||
if self.listNumber > 4:
|
||||
raise TypeError(LIST_TYPE_ERROR)
|
||||
self.name = self.computeName()
|
||||
def analyseTypeDecl(self, typeDecl):
|
||||
for char in typeDecl:
|
||||
if char == 'l':
|
||||
self.listNumber += 1
|
||||
else:
|
||||
# Get the basic type
|
||||
if not (char in list(Type.basicTypes.keys())):
|
||||
raise TypeError(BASIC_TYPE_ERROR % char)
|
||||
self.basicType = Type.basicTypes[char]
|
||||
break
|
||||
if not self.basicType:
|
||||
self.basicType = str
|
||||
def convertBasicValue(self, value):
|
||||
try:
|
||||
return self.basicType(value.strip())
|
||||
except ValueError:
|
||||
raise TypeError(BASIC_VALUE_ERROR % (value,
|
||||
self.basicType.__name__))
|
||||
def convertValue(self, value):
|
||||
'''Converts a p_value which is a string into a value conform
|
||||
to self.'''
|
||||
if self.listNumber == 0:
|
||||
res = self.convertBasicValue(value)
|
||||
else:
|
||||
# Get separators in their order of appearance
|
||||
separators = []
|
||||
for char in value:
|
||||
if (char in Type.separators) and (char not in separators):
|
||||
separators.append(char)
|
||||
# Remove surplus separators
|
||||
if len(separators) > self.listNumber:
|
||||
nbOfSurplusSeps = len(separators) - self.listNumber
|
||||
separators = separators[nbOfSurplusSeps:]
|
||||
# If not enough separators, create corresponding empty lists.
|
||||
res = None
|
||||
innerList = None
|
||||
resIsComplete = False
|
||||
if len(separators) < self.listNumber:
|
||||
if not value:
|
||||
res = []
|
||||
resIsComplete = True
|
||||
else:
|
||||
# Begin with empty list(s)
|
||||
nbOfMissingSeps = self.listNumber - len(separators)
|
||||
res = []
|
||||
innerList = res
|
||||
for i in range(nbOfMissingSeps-1):
|
||||
newInnerList = []
|
||||
innerList.append(newInnerList)
|
||||
innerList = newInnerList
|
||||
# We can now convert the value
|
||||
separators.reverse()
|
||||
if innerList != None:
|
||||
innerList.append(self.convertListItem(value, separators))
|
||||
elif not resIsComplete:
|
||||
try:
|
||||
res = self.convertListItem(value, separators)
|
||||
except TypeError as te:
|
||||
raise TypeError(LIST_VALUE_ERROR % (value, te, self.name))
|
||||
return res
|
||||
def convertListItem(self, stringItem, remainingSeps):
|
||||
if not remainingSeps:
|
||||
res = self.convertBasicValue(stringItem)
|
||||
else:
|
||||
curSep = remainingSeps[0]
|
||||
tempRes = stringItem.split(curSep)
|
||||
if (len(tempRes) == 1) and (not tempRes[0]):
|
||||
# There was no value within value, so we produce an empty list.
|
||||
res = []
|
||||
else:
|
||||
res = []
|
||||
for tempItem in tempRes:
|
||||
res.append(self.convertListItem(tempItem,
|
||||
remainingSeps[1:]))
|
||||
return res
|
||||
def computeName(self):
|
||||
prefix = 'list of ' * self.listNumber
|
||||
return '<%s%s>' % (prefix, self.basicType.__name__)
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class Table(collections.UserList):
|
||||
def __init__(self):
|
||||
collections.UserList.__init__(self)
|
||||
self.name = None
|
||||
self.parent = None
|
||||
self.parentRow = None
|
||||
# Either ~i~ (the ith row in table self.parent, index starts at 0) or
|
||||
# ~(s_columnName:s_columnValue)~ (identifies the 1st row that have
|
||||
# s_columnValue for the column named s_columnName)
|
||||
def dump(self, withContent=True):
|
||||
res = 'Table "%s"' % self.name
|
||||
if self.parent:
|
||||
res += ' extends table "%s"' % self.parent.name
|
||||
if isinstance(self.parentRow, int):
|
||||
res += '(%d)' % self.parentRow
|
||||
else:
|
||||
res += '(%s=%s)' % self.parentRow
|
||||
if withContent:
|
||||
res += '\n'
|
||||
for line in self:
|
||||
res += str(line)
|
||||
return res
|
||||
def instanceOf(self, tableName):
|
||||
res = False
|
||||
if self.parent:
|
||||
if self.parent.name == tableName:
|
||||
res = True
|
||||
else:
|
||||
res = self.parent.instanceOf(tableName)
|
||||
return res
|
||||
def asDict(self):
|
||||
'''If this table as only 2 columns named "key" and "value", it can be
|
||||
represented as a Python dict. This method produces this dict.'''
|
||||
infoDict = {}
|
||||
if self.parent:
|
||||
for info in self.parent:
|
||||
infoDict[info["key"]] = info["value"]
|
||||
for info in self:
|
||||
infoDict[info["key"]] = info["value"]
|
||||
return infoDict
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class TableRow(UserDict.UserDict):
|
||||
def __init__(self, table):
|
||||
UserDict.UserDict.__init__(self)
|
||||
self.table = table
|
||||
def __getitem__(self, key):
|
||||
'''This method "implements" row inheritance: if the current row does
|
||||
not have an element with p_key, it looks in the parent row of this row,
|
||||
via the parent table self.table.'''
|
||||
keyError = False
|
||||
t = self.table
|
||||
if key in self:
|
||||
res = UserDict.UserDict.__getitem__(self, key)
|
||||
else:
|
||||
# Get the parent row
|
||||
if t.parent:
|
||||
if isinstance(t.parentRow, int):
|
||||
if t.parentRow < len(t.parent):
|
||||
try:
|
||||
res = t.parent[t.parentRow][key]
|
||||
except KeyError:
|
||||
keyError = True
|
||||
else:
|
||||
raise ParserError(PARENT_ROW_NOT_FOUND %
|
||||
(t.name, t.parent.name, t.parentRow,
|
||||
t.parent.name, len(t.parent)))
|
||||
else:
|
||||
tColumn, tValue = t.parentRow
|
||||
# Get the 1st row having tColumn = tValue
|
||||
rowFound = False
|
||||
for row in t.parent:
|
||||
try:
|
||||
curVal = row[tColumn]
|
||||
except KeyError:
|
||||
raise ParserError(PARENT_COLUMN_NOT_FOUND %
|
||||
(t.name, t.parent.name, tColumn,
|
||||
t.parent.name))
|
||||
if curVal == tValue:
|
||||
rowFound = True
|
||||
try:
|
||||
res = row[key]
|
||||
except KeyError:
|
||||
keyError = True
|
||||
break
|
||||
if not rowFound:
|
||||
raise ParserError(PARENT_ROW_COL_NOT_FOUND %
|
||||
(t.name, t.parent.name, tColumn,
|
||||
tValue, t.parent.name))
|
||||
else:
|
||||
keyError = True
|
||||
if keyError:
|
||||
raise KeyError(TABLE_KEY_ERROR % (t.name, key, t.name))
|
||||
return res
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class NameResolver:
|
||||
def resolveNames(self, tables):
|
||||
for tableName, table in tables.items():
|
||||
if table.parent:
|
||||
if table.parent not in tables:
|
||||
raise ParserError(PARENT_NOT_FOUND %
|
||||
(table.parent, table.name))
|
||||
table.parent = tables[table.parent]
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class TableParser:
|
||||
# Parser possible states
|
||||
IGNORE = 0
|
||||
READING_CONTROL_WORD = 1
|
||||
READING_CONTENT = 2
|
||||
READING_SPECIAL_CHAR = 3
|
||||
def __init__(self, fileName):
|
||||
self.input = open(fileName)
|
||||
self.state = None
|
||||
# RTF character types
|
||||
self.alpha = re.compile('[a-zA-Z_\-\*]')
|
||||
self.numeric = re.compile('[0-9]')
|
||||
self.whiteSpaces = (' ', '\t', '\n', '\r', '\f', '\v')
|
||||
self.specialChars = {91:"'", 92:"'", 93:'"', 94:'"', 85:'...', 81:'<EFBFBD>',
|
||||
4:'', 5:''}
|
||||
# Parser state
|
||||
self.state = TableParser.READING_CONTENT
|
||||
# Parser buffers
|
||||
self.controlWordBuffer = ''
|
||||
self.contentBuffer = StringIO()
|
||||
self.specialCharBuffer = ''
|
||||
# Resulting RTF output tables
|
||||
self.rtfTables = {}
|
||||
# Attributes needed by onRow and onColumn
|
||||
self.nbOfColumns = 0
|
||||
self.currentRow = []
|
||||
self.previousRow = []
|
||||
self.currentTable = Table()
|
||||
self.currentTableName = None
|
||||
self.currentColumnNames = None # ~[]~
|
||||
self.currentColumnTypes = None # ~[]~
|
||||
self.rowIsHeader = False
|
||||
# Table name regular expression
|
||||
self.tableNameRex = re.compile('([^\(]+)(?:\((.*)\))?')
|
||||
def isGroupDelimiter(self, char):
|
||||
return (char == '{') or (char == '}')
|
||||
def isControlWordStart(self, char):
|
||||
return (char == '\\')
|
||||
def isAlpha(self, char):
|
||||
return self.alpha.match(char)
|
||||
def isNumeric(self, char):
|
||||
return self.numeric.match(char)
|
||||
def isWhiteSpace(self, char):
|
||||
return (char in self.whiteSpaces)
|
||||
def isQuote(self, char):
|
||||
return char == "'"
|
||||
def manageControlWord(self):
|
||||
self.state = TableParser.READING_CONTENT
|
||||
cWord = self.controlWordBuffer
|
||||
if cWord == 'trowd':
|
||||
self.contentBuffer.truncate(0)
|
||||
elif cWord == 'row':
|
||||
self.onRow()
|
||||
self.contentBuffer.truncate(0)
|
||||
elif cWord == 'cell':
|
||||
self.onColumn(self.contentBuffer.getvalue().strip())
|
||||
self.contentBuffer.truncate(0)
|
||||
elif cWord in ('bkmkstart', 'bkmkend'):
|
||||
self.state = TableParser.IGNORE
|
||||
self.controlWordBuffer = ''
|
||||
def manageSpecialChar(self):
|
||||
specialChar = int(self.specialCharBuffer)
|
||||
self.specialCharBuffer = ''
|
||||
if specialChar in self.specialChars:
|
||||
self.contentBuffer.write(self.specialChars[specialChar])
|
||||
else:
|
||||
print(('Warning: char %d not known.' % specialChar))
|
||||
self.state = TableParser.READING_CONTENT
|
||||
def bufferize(self, char):
|
||||
if self.state == TableParser.READING_CONTROL_WORD:
|
||||
self.controlWordBuffer += char
|
||||
elif self.state == TableParser.READING_CONTENT:
|
||||
self.contentBuffer.write(char)
|
||||
elif self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.specialCharBuffer += char
|
||||
def parse(self):
|
||||
for line in self.input:
|
||||
for char in line:
|
||||
if self.isGroupDelimiter(char):
|
||||
if self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.manageSpecialChar()
|
||||
self.state = TableParser.READING_CONTENT
|
||||
elif self.isControlWordStart(char):
|
||||
if self.state == TableParser.READING_CONTROL_WORD:
|
||||
self.manageControlWord()
|
||||
elif self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.manageSpecialChar()
|
||||
self.controlWordBuffer = ''
|
||||
self.state = TableParser.READING_CONTROL_WORD
|
||||
elif self.isAlpha(char):
|
||||
if self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.manageSpecialChar()
|
||||
self.bufferize(char)
|
||||
elif self.isNumeric(char):
|
||||
self.bufferize(char)
|
||||
elif self.isWhiteSpace(char):
|
||||
if self.state == TableParser.READING_CONTROL_WORD:
|
||||
self.manageControlWord()
|
||||
elif self.state == TableParser.READING_CONTENT:
|
||||
if char not in ['\n', '\r']:
|
||||
self.contentBuffer.write(char)
|
||||
elif self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.manageSpecialChar()
|
||||
if char not in ['\n', '\r']:
|
||||
self.contentBuffer.write(char)
|
||||
elif self.isQuote(char):
|
||||
if (self.state == TableParser.READING_CONTROL_WORD) and \
|
||||
not self.controlWordBuffer:
|
||||
self.state = TableParser.READING_SPECIAL_CHAR
|
||||
elif self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.manageSpecialChar()
|
||||
self.bufferize(char)
|
||||
else:
|
||||
self.bufferize(char)
|
||||
else:
|
||||
if self.state == TableParser.READING_CONTENT:
|
||||
self.contentBuffer.write(char)
|
||||
elif self.state == TableParser.READING_SPECIAL_CHAR:
|
||||
self.manageSpecialChar()
|
||||
self.contentBuffer.write(char)
|
||||
if self.controlWordBuffer:
|
||||
self.manageControlWord()
|
||||
if self.currentTableName:
|
||||
self.addTable(self.currentTableName, self.currentTable)
|
||||
return self.rtfTables
|
||||
def getColumnInfos(self, columnHeaders):
|
||||
'''Get, from the column headers, column names and types.'''
|
||||
columnNames = []
|
||||
columnTypes = []
|
||||
for header in columnHeaders:
|
||||
if header.find(':') != -1:
|
||||
# We have a type declaration
|
||||
name, typeDecl = header.split(':')
|
||||
columnNames.append(name.strip())
|
||||
try:
|
||||
columnTypes.append(Type(typeDecl.strip()))
|
||||
except TypeError as te:
|
||||
raise ParserError(TYPE_ERROR %
|
||||
(header, self.currentTableName, te))
|
||||
else:
|
||||
# No type declaration: implicitly it is a string
|
||||
columnNames.append(header)
|
||||
columnTypes.append(None)
|
||||
return columnNames, columnTypes
|
||||
def onRow(self):
|
||||
if (self.nbOfColumns == 0) or not self.currentRow:
|
||||
pass
|
||||
else:
|
||||
if self.rowIsHeader:
|
||||
self.currentColumnNames, self.currentColumnTypes = \
|
||||
self.getColumnInfos(self.currentRow)
|
||||
self.rowIsHeader = False
|
||||
elif self.nbOfColumns == 1:
|
||||
self.rowIsHeader = True
|
||||
if self.currentTableName:
|
||||
self.addTable(self.currentTableName, self.currentTable)
|
||||
self.currentTable = Table()
|
||||
self.currentTableName = self.currentRow[0]
|
||||
else:
|
||||
self.addRow()
|
||||
del self.currentRow[:]
|
||||
self.nbOfColumns = 0
|
||||
def onColumn(self, content):
|
||||
self.currentRow.append(content)
|
||||
self.nbOfColumns += 1
|
||||
def addRow(self):
|
||||
i = 0
|
||||
row = TableRow(self.currentTable)
|
||||
for columnName in self.currentColumnNames:
|
||||
columnValue = self.currentRow[i]
|
||||
if columnValue == '"':
|
||||
if len(self.currentTable) == 0:
|
||||
raise ParserError(
|
||||
NO_ROWS_IN_TABLE_YET % self.currentTableName)
|
||||
else:
|
||||
lastRow = self.currentTable[len(self.currentTable)-1]
|
||||
columnValue = lastRow[columnName]
|
||||
else:
|
||||
columnType = self.currentColumnTypes[i]
|
||||
if columnType:
|
||||
try:
|
||||
columnValue = columnType.convertValue(columnValue)
|
||||
except TypeError as te:
|
||||
raise ParserError(VALUE_ERROR %
|
||||
(columnName, self.currentTableName,
|
||||
te))
|
||||
row[columnName] = columnValue
|
||||
i += 1
|
||||
self.currentTable.append(row)
|
||||
def addTable(self, tableName, table):
|
||||
res = self.tableNameRex.search(tableName)
|
||||
tName, parentSpec = res.groups()
|
||||
table.name = tName
|
||||
if parentSpec:
|
||||
res = parentSpec.split(':')
|
||||
if len(res) == 1:
|
||||
table.parent = parentSpec.strip()
|
||||
table.parentRow = 0
|
||||
else:
|
||||
table.parent = res[0].strip()
|
||||
res = res[1].split('=')
|
||||
if len(res) == 1:
|
||||
try:
|
||||
table.parentRow = int(res[0])
|
||||
except ValueError:
|
||||
raise ParserError(BAD_PARENT_ROW %
|
||||
(table.name, table.parent,
|
||||
res[0]))
|
||||
if table.parentRow < 0:
|
||||
raise ParserError(BAD_PARENT_ROW %
|
||||
(table.name, table.parent,
|
||||
res[0]))
|
||||
else:
|
||||
table.parentRow = (res[0].strip(), res[1].strip())
|
||||
self.rtfTables[table.name] = table
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
class RtfTablesParser:
|
||||
def __init__(self, fileName):
|
||||
self.tableParser = TableParser(fileName)
|
||||
self.nameResolver = NameResolver()
|
||||
def parse(self):
|
||||
tables = self.tableParser.parse()
|
||||
self.nameResolver.resolveNames(tables)
|
||||
return tables
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
if __name__ =='__main__':
|
||||
tables = RtfTablesParser("Tests.rtf").parse()
|
||||
for key, item in tables.items():
|
||||
print(('Table %s' % key))
|
||||
print(item)
|
||||
# -----------------------------------------------------------------------------
|
234
appy/shared/sap.py
Normal file
234
appy/shared/sap.py
Normal file
|
@ -0,0 +1,234 @@
|
|||
'''This module allows to call RFC functions exposed by a distant SAP system.
|
||||
It requires the "pysap" module available at http://pysaprfc.sourceforge.net
|
||||
and the library librfccm.so that one can download from the "SAP MarketPlace",
|
||||
a website by SAP requiring a login/password.'''
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
from appy.shared.utils import sequenceTypes
|
||||
|
||||
class SapError(Exception): pass
|
||||
SAP_MODULE_ERROR = 'Module pysap was not found (you can get it at ' \
|
||||
'http://pysaprfc.sourceforge.net)'
|
||||
SAP_CONNECT_ERROR = 'Error while connecting to SAP (conn_string: %s). %s'
|
||||
SAP_FUNCTION_ERROR = 'Error while calling function "%s". %s'
|
||||
SAP_DISCONNECT_ERROR = 'Error while disconnecting from SAP. %s'
|
||||
SAP_TABLE_PARAM_ERROR = 'Param "%s" does not correspond to a valid table ' \
|
||||
'parameter for function "%s".'
|
||||
SAP_STRUCT_ELEM_NOT_FOUND = 'Structure used by parameter "%s" does not define '\
|
||||
'an attribute named "%s."'
|
||||
SAP_STRING_REQUIRED = 'Type mismatch for attribute "%s" used in parameter ' \
|
||||
'"%s": a string value is expected (SAP type is %s).'
|
||||
SAP_STRING_OVERFLOW = 'A string value for attribute "%s" used in parameter ' \
|
||||
'"%s" is too long (SAP type is %s).'
|
||||
SAP_FUNCTION_NOT_FOUND = 'Function "%s" does not exist.'
|
||||
SAP_FUNCTION_INFO_ERROR = 'Error while asking information about function ' \
|
||||
'"%s". %s'
|
||||
SAP_GROUP_NOT_FOUND = 'Group of functions "%s" does not exist or is empty.'
|
||||
|
||||
# Is the pysap module present or not ?
|
||||
hasSap = True
|
||||
try:
|
||||
import pysap
|
||||
except ImportError:
|
||||
hasSap = False
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class SapResult:
|
||||
'''Represents a result as returned by SAP. It defines a __getattr__ method
|
||||
that allows to retrieve SAP "output" parameters (export, tables) by their
|
||||
name (as if they were attributes of this class), in a Python format
|
||||
(list, dict, simple value).'''
|
||||
def __init__(self, function):
|
||||
# The pysap function obj that was called and that produced this result.
|
||||
self.function = function
|
||||
def __getattr__(self, name):
|
||||
'''Allows a smart access to self.function's results.'''
|
||||
if name.startswith('__'): raise AttributeError
|
||||
paramValue = self.function[name]
|
||||
paramType = paramValue.__class__.__name__
|
||||
if paramType == 'ItTable':
|
||||
return paramValue.to_list()
|
||||
elif paramType == 'STRUCT':
|
||||
return paramValue.to_dict()
|
||||
else:
|
||||
return paramValue
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Sap:
|
||||
'''Represents a remote SAP system. This class allows to connect to a distant
|
||||
SAP system and perform RFC calls.'''
|
||||
def __init__(self, host, sysnr, client, user, password):
|
||||
self.host = host # Hostname or IP address of SAP server
|
||||
self.sysnr = sysnr # The system number of SAP server/gateway
|
||||
self.client = client # The instance/client number
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.sap = None # Will hold the handler to the SAP distant system.
|
||||
self.functionName = None # The name of the next function to call.
|
||||
if not hasSap: raise SapError(SAP_MODULE_ERROR)
|
||||
|
||||
def connect(self):
|
||||
'''Connects to the SAP system.'''
|
||||
params = 'ASHOST=%s SYSNR=%s CLIENT=%s USER=%s PASSWD=%s' % (self.host,
|
||||
self.sysnr, self.client, self.user, self.password)
|
||||
try:
|
||||
self.sap = pysap.Rfc_connection(conn_string = params)
|
||||
self.sap.open()
|
||||
except pysap.BaseSapRfcError as se:
|
||||
# Put in the error message the connection string without the
|
||||
# password.
|
||||
connNoPasswd = params[:params.index('PASSWD')] + 'PASSWD=********'
|
||||
raise SapError(SAP_CONNECT_ERROR % (connNoPasswd, str(se)))
|
||||
|
||||
def createStructure(self, structDef, userData, paramName):
|
||||
'''Create a struct corresponding to SAP/C structure definition
|
||||
p_structDef and fills it with dict p_userData.'''
|
||||
res = structDef()
|
||||
for name, value in userData.items():
|
||||
if name not in structDef._sfield_names_:
|
||||
raise SapError(SAP_STRUCT_ELEM_NOT_FOUND % (paramName, name))
|
||||
sapType = structDef._sfield_sap_types_[name]
|
||||
# Check if the value is valid according to the required type
|
||||
if sapType[0] == 'C':
|
||||
sType = '%s%d' % (sapType[0], sapType[1])
|
||||
# "None" value is tolerated.
|
||||
if value == None: value = ''
|
||||
if not isinstance(value, str):
|
||||
raise SapError(
|
||||
SAP_STRING_REQUIRED % (name, paramName, sType))
|
||||
if len(value) > sapType[1]:
|
||||
raise SapError(
|
||||
SAP_STRING_OVERFLOW % (name, paramName, sType))
|
||||
# Left-fill the string with blanks.
|
||||
v = value.ljust(sapType[1])
|
||||
else:
|
||||
v = value
|
||||
res[name.lower()] = v
|
||||
return res
|
||||
|
||||
def call(self, functionName=None, **params):
|
||||
'''Calls a function on the SAP server.'''
|
||||
try:
|
||||
if not functionName:
|
||||
functionName = self.functionName
|
||||
function = self.sap.get_interface(functionName)
|
||||
# Specify the parameters
|
||||
for name, value in params.items():
|
||||
if type(value) == dict:
|
||||
# The param corresponds to a SAP/C "struct"
|
||||
v = self.createStructure(
|
||||
self.sap.get_structure(name),value, name)
|
||||
elif type(value) in sequenceTypes:
|
||||
# The param must be a SAP/C "table" (a list of structs)
|
||||
# Retrieve the name of the struct type related to this
|
||||
# table.
|
||||
fDesc = self.sap.get_interface_desc(functionName)
|
||||
tableTypeName = ''
|
||||
for tDesc in fDesc.tables:
|
||||
if tDesc.name == name:
|
||||
# We have found the correct table param
|
||||
tableTypeName = tDesc.field_def
|
||||
break
|
||||
if not tableTypeName:
|
||||
raise SapError(\
|
||||
SAP_TABLE_PARAM_ERROR % (name, functionName))
|
||||
v = self.sap.get_table(tableTypeName)
|
||||
for dValue in value:
|
||||
v.append(self.createStructure(v.struc, dValue, name))
|
||||
else:
|
||||
v = value
|
||||
function[name] = v
|
||||
# Call the function
|
||||
function()
|
||||
except pysap.BaseSapRfcError as se:
|
||||
raise SapError(SAP_FUNCTION_ERROR % (functionName, str(se)))
|
||||
return SapResult(function)
|
||||
|
||||
def __getattr__(self, name):
|
||||
'''The user can directly call self.<sapFunctionName>(params) instead of
|
||||
calling self.call(<sapFunctionName>, params).'''
|
||||
if name.startswith('__'): raise AttributeError
|
||||
self.functionName = name
|
||||
return self.call
|
||||
|
||||
def getTypeInfo(self, typeName):
|
||||
'''Returns information about the type (structure) named p_typeName.'''
|
||||
res = ''
|
||||
tInfo = self.sap.get_structure(typeName)
|
||||
for fName, fieldType in tInfo._fields_:
|
||||
res += ' %s: %s (%s)\n' % (fName, tInfo.sap_def(fName),
|
||||
tInfo.sap_type(fName))
|
||||
return res
|
||||
|
||||
def getFunctionInfo(self, functionName):
|
||||
'''Returns information about the RFC function named p_functionName.'''
|
||||
try:
|
||||
res = ''
|
||||
usedTypes = set() # Names of type definitions used in parameters.
|
||||
fDesc = self.sap.get_interface_desc(functionName)
|
||||
functionDescr = str(fDesc).strip()
|
||||
if functionDescr: res += functionDescr
|
||||
# Import parameters
|
||||
if fDesc.imports:
|
||||
res += '\nIMPORTS\n'
|
||||
for iDesc in fDesc.imports:
|
||||
res += ' %s\n' % str(iDesc)
|
||||
usedTypes.add(iDesc.field_def)
|
||||
# Export parameters
|
||||
if fDesc.exports:
|
||||
res += '\nEXPORTS\n'
|
||||
for eDesc in fDesc.exports:
|
||||
res += ' %s\n' % str(eDesc)
|
||||
usedTypes.add(eDesc.field_def)
|
||||
if fDesc.tables:
|
||||
res += '\nTABLES\n'
|
||||
for tDesc in fDesc.tables:
|
||||
res += ' %s\n' % str(tDesc)
|
||||
usedTypes.add(tDesc.field_def)
|
||||
if fDesc.exceptions:
|
||||
res += '\nEXCEPTIONS\n'
|
||||
for eDesc in fDesc.exceptions:
|
||||
res += ' %s\n' % str(eDesc)
|
||||
# Add information about used types
|
||||
if usedTypes:
|
||||
res += '\nTypes used by the parameters:\n'
|
||||
for typeName in usedTypes:
|
||||
# Dump info only if it is a structure, not a simple type
|
||||
try:
|
||||
self.sap.get_structure(typeName)
|
||||
res += '%s\n%s\n\n' % \
|
||||
(typeName, self.getTypeInfo(typeName))
|
||||
except pysap.BaseSapRfcError as ee:
|
||||
pass
|
||||
return res
|
||||
except pysap.BaseSapRfcError as se:
|
||||
if se.value == 'FU_NOT_FOUND':
|
||||
raise SapError(SAP_FUNCTION_NOT_FOUND % (functionName))
|
||||
else:
|
||||
raise SapError(SAP_FUNCTION_INFO_ERROR % (functionName,str(se)))
|
||||
|
||||
def getGroupInfo(self, groupName):
|
||||
'''Gets information about the functions that are available in group of
|
||||
functions p_groupName.'''
|
||||
if groupName == '_all_':
|
||||
# Search everything.
|
||||
functions = self.sap.search_functions('*')
|
||||
else:
|
||||
functions = self.sap.search_functions('*', grpname=groupName)
|
||||
if not functions:
|
||||
raise SapError(SAP_GROUP_NOT_FOUND % (groupName))
|
||||
res = 'Available functions:\n'
|
||||
for f in functions:
|
||||
res += ' %s' % f.funcname
|
||||
if groupName == '_all_':
|
||||
res += ' (group: %s)' % f.groupname
|
||||
res += '\n'
|
||||
return res
|
||||
|
||||
def disconnect(self):
|
||||
'''Disconnects from SAP.'''
|
||||
try:
|
||||
self.sap.close()
|
||||
except pysap.BaseSapRfcError as se:
|
||||
raise SapError(SAP_DISCONNECT_ERROR % str(se))
|
||||
# ------------------------------------------------------------------------------
|
289
appy/shared/test.py
Normal file
289
appy/shared/test.py
Normal file
|
@ -0,0 +1,289 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
# Appy is a framework for building applications in the Python language.
|
||||
# Copyright (C) 2007 Gaetan Delannay
|
||||
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
import os, os.path, sys, time
|
||||
from optparse import OptionParser
|
||||
from appy.shared.utils import FolderDeleter, Traceback
|
||||
from appy.shared.errors import InternalError
|
||||
from appy.shared.rtf import RtfTablesParser
|
||||
from appy.shared.xml_parser import XmlComparator
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class TesterError(Exception): pass
|
||||
|
||||
# TesterError-related constants
|
||||
WRONG_TEST_PLAN = 'The test plan you specified does not correspond to an ' \
|
||||
'existing RTF file.'
|
||||
_FLAVOUR = 'A flavour represents a test configuration.'
|
||||
FLAVOURS_NOT_LIST = 'The flavours specified must be a list or tuple of ' \
|
||||
'string. ' + _FLAVOUR
|
||||
FLAVOUR_NOT_STRING = 'Each specified flavour must be a string. ' + _FLAVOUR
|
||||
WRONG_TEST_FACTORY = 'You must give a test factory that inherits from the ' \
|
||||
'abstract "appy.shared.test.TestFactory" class.'
|
||||
CREATE_TEST_NOT_OVERRIDDEN = 'The appy.shared.test.TestFactory.createTest ' \
|
||||
'method must be overridden in your concrete ' \
|
||||
'TestFactory.'
|
||||
MAIN_TABLE_NOT_FOUND = 'No table "TestSuites" found in test plan "%s".'
|
||||
MAIN_TABLE_MALFORMED = 'The "TestSuites" table must have at least two ' \
|
||||
'columns, named "Name" and "Description".'
|
||||
TEST_SUITE_NOT_FOUND = 'Table "%s.descriptions" and/or "%s.data" were not ' \
|
||||
'found.'
|
||||
TEST_SUITE_MALFORMED = 'Tables "%s.descriptions" and "%s.data" do not have ' \
|
||||
'the same length. For each test in "%s.data", You ' \
|
||||
'should have one line in "%s.descriptions" describing ' \
|
||||
'the test.'
|
||||
FILE_NOT_FOUND = 'File to compare "%s" was not found.'
|
||||
WRONG_ARGS = 'You must specify as unique argument the configuration flavour ' \
|
||||
'you want, which may be one of %s.'
|
||||
WRONG_FLAVOUR = 'Wrong flavour "%s". Flavour must be one of %s.'
|
||||
|
||||
# InternalError-related constants
|
||||
TEST_REPORT_SINGLETON_ERROR = 'You can only use the TestReport constructor ' \
|
||||
'once. After that you can access the single ' \
|
||||
'TestReport instance via the TestReport.' \
|
||||
'instance static member.'
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class TestReport:
|
||||
instance = None
|
||||
def __init__(self, testReportFileName, verbose):
|
||||
if TestReport.instance == None:
|
||||
self.report = open(testReportFileName, 'w')
|
||||
self.verbose = verbose
|
||||
TestReport.instance = self
|
||||
else:
|
||||
raise InternalError(TEST_REPORT_SINGLETON_ERROR)
|
||||
def say(self, msg, force=False, encoding=None):
|
||||
if self.verbose or force:
|
||||
print(msg)
|
||||
if encoding:
|
||||
self.report.write(msg.encode(encoding))
|
||||
else:
|
||||
self.report.write(msg)
|
||||
self.report.write('\n')
|
||||
def close(self):
|
||||
self.report.close()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Test:
|
||||
'''Abstract test class.'''
|
||||
def __init__(self, testData, testDescription, testFolder, config, flavour):
|
||||
self.data = testData
|
||||
self.description = testDescription
|
||||
self.testFolder = testFolder
|
||||
self.tempFolder = None
|
||||
self.report = TestReport.instance
|
||||
self.errorDump = None
|
||||
self.config = config
|
||||
self.flavour = flavour
|
||||
def compareFiles(self, expected, actual, areXml=False, xmlTagsToIgnore=(),
|
||||
xmlAttrsToIgnore=(), encoding=None):
|
||||
'''Compares 2 files. r_ is True if files are different. The differences
|
||||
are written in the test report.'''
|
||||
for f in expected, actual:
|
||||
assert os.path.exists(f), TesterError(FILE_NOT_FOUND % f)
|
||||
# Expected result (may be different according to flavour)
|
||||
if self.flavour:
|
||||
expectedFlavourSpecific = '%s.%s' % (expected, self.flavour)
|
||||
if os.path.exists(expectedFlavourSpecific):
|
||||
expected = expectedFlavourSpecific
|
||||
# Perform the comparison
|
||||
comparator = XmlComparator(actual, expected, areXml, xmlTagsToIgnore,
|
||||
xmlAttrsToIgnore)
|
||||
return not comparator.filesAreIdentical(
|
||||
report=self.report, encoding=encoding)
|
||||
def run(self):
|
||||
self.report.say('-' * 79)
|
||||
self.report.say('- Test %s.' % self.data['Name'])
|
||||
self.report.say('- %s\n' % self.description)
|
||||
# Prepare test data
|
||||
self.tempFolder = os.path.join(self.testFolder, 'temp')
|
||||
if os.path.exists(self.tempFolder):
|
||||
time.sleep(0.3) # Sometimes I can't remove it, so I wait
|
||||
FolderDeleter.delete(self.tempFolder)
|
||||
os.mkdir(self.tempFolder)
|
||||
try:
|
||||
self.do()
|
||||
self.report.say('Checking result...')
|
||||
testFailed = self.checkResult()
|
||||
except:
|
||||
testFailed = self.onError()
|
||||
self.finalize()
|
||||
return testFailed
|
||||
def do(self):
|
||||
'''Concrete part of the test. Must be overridden.'''
|
||||
def checkResult(self):
|
||||
'''r_ is False if the test succeeded.'''
|
||||
return True
|
||||
def onError(self):
|
||||
'''What must happen when an exception is raised during test
|
||||
execution? Returns True if the test failed.'''
|
||||
self.errorDump = Traceback.get()
|
||||
self.report.say('Exception occurred:')
|
||||
self.report.say(self.errorDump)
|
||||
return True
|
||||
def finalize(self):
|
||||
'''Performs sme cleaning actions after test execution.'''
|
||||
pass
|
||||
def isExpectedError(self, expectedMessage):
|
||||
'''An exception was thrown. So check if the actual error message
|
||||
(stored in self.errorDump) corresponds to the p_expectedMessage.'''
|
||||
res = True
|
||||
for line in expectedMessage:
|
||||
if (self.errorDump.find(line) == -1):
|
||||
res = False
|
||||
self.report.say('"%s" not found among error dump.' % line)
|
||||
break
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class TestFactory:
|
||||
def createTest(testData, testDescription, testFolder, config, flavour):
|
||||
'''This method allows you to create tests that are instances of classes
|
||||
that you create. Those classes must be children of
|
||||
appy.shared.test.Test. m_createTest must return a Test instance and
|
||||
is called every time a test definition is encountered in the test
|
||||
plan.'''
|
||||
raise TesterError(CREATE_TEST_NOT_OVERRIDDEN)
|
||||
createTest = staticmethod(createTest)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Tester:
|
||||
def __init__(self, testPlan, flavours, testFactory):
|
||||
# Check test plan
|
||||
if (not os.path.exists(testPlan)) or (not os.path.isfile(testPlan)) \
|
||||
or (not testPlan.endswith('.rtf')):
|
||||
raise TesterError(WRONG_TEST_PLAN)
|
||||
self.testPlan = testPlan
|
||||
self.testFolder = os.path.abspath(os.path.dirname(testPlan))
|
||||
# Check flavours
|
||||
if (not isinstance(flavours, list)) and \
|
||||
(not isinstance(flavours, tuple)):
|
||||
raise TesterError(FLAVOURS_NOT_LIST)
|
||||
for flavour in flavours:
|
||||
if not isinstance(flavour, str):
|
||||
raise TesterError(FLAVOUR_NOT_STRING)
|
||||
self.flavours = flavours
|
||||
self.flavour = None
|
||||
# Check test factory
|
||||
if not issubclass(testFactory, TestFactory):
|
||||
raise TesterError(WRONG_TEST_FACTORY)
|
||||
self.testFactory = testFactory
|
||||
self.getOptions()
|
||||
self.report = TestReport('%s/Tester.report.txt' % self.testFolder,
|
||||
self.verbose)
|
||||
self.report.say('Parsing RTF file... ')
|
||||
t1 = time.time()
|
||||
self.tables = RtfTablesParser(testPlan).parse()
|
||||
t2 = time.time() - t1
|
||||
self.report.say('Done in %d seconds' % t2)
|
||||
self.config = None
|
||||
ext = ''
|
||||
if self.flavour:
|
||||
ext = '.%s' % self.flavour
|
||||
configTableName = 'Configuration%s' % ext
|
||||
if configTableName in self.tables:
|
||||
self.config = self.tables[configTableName].asDict()
|
||||
self.tempFolder = os.path.join(self.testFolder, 'temp')
|
||||
if os.path.exists(self.tempFolder):
|
||||
FolderDeleter.delete(self.tempFolder)
|
||||
self.nbOfTests = 0
|
||||
self.nbOfSuccesses = 0
|
||||
self.nbOfIgnoredTests = 0
|
||||
def getOptions(self):
|
||||
optParser = OptionParser()
|
||||
optParser.add_option("-v", "--verbose", action="store_true",
|
||||
help="Dumps the whole test report on stdout")
|
||||
optParser.add_option("-k", "--keepTemp", action="store_true", help = \
|
||||
"Keep the temp folder, in order to be able to " \
|
||||
"copy some results and make them expected " \
|
||||
"results when needed.")
|
||||
(options, args) = optParser.parse_args()
|
||||
if self.flavours:
|
||||
if len(args) != 1:
|
||||
raise TesterError(WRONG_ARGS % self.flavours)
|
||||
self.flavour = args[0]
|
||||
if not self.flavour in self.flavours:
|
||||
raise TesterError(WRONG_FLAVOUR % (self.flavour, self.flavours))
|
||||
self.verbose = options.verbose == True
|
||||
self.keepTemp = options.keepTemp == True
|
||||
def runSuite(self, suite):
|
||||
self.report.say('*' * 79)
|
||||
self.report.say('* Suite %s.' % suite['Name'])
|
||||
self.report.say('* %s\n' % suite['Description'])
|
||||
i = -1
|
||||
for testData in self.tables['%s.data' % suite['Name']]:
|
||||
self.nbOfTests += 1
|
||||
i += 1
|
||||
if testData['Name'].startswith('_'):
|
||||
self.nbOfIgnoredTests += 1
|
||||
else:
|
||||
description = self.tables['%s.descriptions' % \
|
||||
suite['Name']][i]['Description']
|
||||
test = self.testFactory.createTest(
|
||||
testData, description, self.testFolder, self.config,
|
||||
self.flavour)
|
||||
testFailed = test.run()
|
||||
if not self.verbose:
|
||||
sys.stdout.write('.')
|
||||
sys.stdout.flush()
|
||||
if testFailed:
|
||||
self.report.say('Test failed.\n')
|
||||
else:
|
||||
self.report.say('Test successful.\n')
|
||||
self.nbOfSuccesses += 1
|
||||
def run(self):
|
||||
assert 'TestSuites' in self.tables, \
|
||||
TesterError(MAIN_TABLE_NOT_FOUND % self.testPlan)
|
||||
for testSuite in self.tables['TestSuites']:
|
||||
if ('Name' not in testSuite) or \
|
||||
('Description' not in testSuite):
|
||||
raise TesterError(MAIN_TABLE_MALFORMED)
|
||||
if testSuite['Name'].startswith('_'):
|
||||
tsName = testSuite['Name'][1:]
|
||||
tsIgnored = True
|
||||
else:
|
||||
tsName = testSuite['Name']
|
||||
tsIgnored = False
|
||||
assert '%s.descriptions' % tsName in self.tables \
|
||||
and '%s.data' % tsName in self.tables, \
|
||||
TesterError(TEST_SUITE_NOT_FOUND % (tsName, tsName))
|
||||
assert len(self.tables['%s.descriptions' % tsName]) == \
|
||||
len(self.tables['%s.data' % tsName]), \
|
||||
TesterError(TEST_SUITE_MALFORMED % ((tsName,)*4))
|
||||
if tsIgnored:
|
||||
nbOfIgnoredTests = len(self.tables['%s.data' % tsName])
|
||||
self.nbOfIgnoredTests += nbOfIgnoredTests
|
||||
self.nbOfTests += nbOfIgnoredTests
|
||||
else:
|
||||
self.runSuite(testSuite)
|
||||
self.finalize()
|
||||
def finalize(self):
|
||||
msg = '%d/%d successful test(s)' % \
|
||||
(self.nbOfSuccesses, (self.nbOfTests-self.nbOfIgnoredTests))
|
||||
if self.nbOfIgnoredTests >0:
|
||||
msg += ', but %d ignored test(s) not counted' % \
|
||||
self.nbOfIgnoredTests
|
||||
msg += '.'
|
||||
self.report.say(msg, force=True)
|
||||
self.report.close()
|
||||
if not self.keepTemp:
|
||||
if os.path.exists(self.tempFolder):
|
||||
FolderDeleter.delete(self.tempFolder)
|
||||
# ------------------------------------------------------------------------------
|
703
appy/shared/utils.py
Normal file
703
appy/shared/utils.py
Normal file
|
@ -0,0 +1,703 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------------------------
|
||||
# Appy is a framework for building applications in the Python language.
|
||||
# Copyright (C) 2007 Gaetan Delannay
|
||||
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
import os, os.path, re, time, sys, traceback, unicodedata, shutil, mimetypes
|
||||
sequenceTypes = (list, tuple)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class FolderDeleter:
|
||||
@staticmethod
|
||||
def delete(dirName):
|
||||
'''Recursively deletes p_dirName.'''
|
||||
dirName = os.path.abspath(dirName)
|
||||
for root, dirs, files in os.walk(dirName, topdown=False):
|
||||
for name in files:
|
||||
os.remove(os.path.join(root, name))
|
||||
for name in dirs:
|
||||
os.rmdir(os.path.join(root, name))
|
||||
os.rmdir(dirName)
|
||||
|
||||
@staticmethod
|
||||
def deleteEmpty(dirName):
|
||||
'''Deletes p_dirName and its parent dirs if they are empty.'''
|
||||
while True:
|
||||
try:
|
||||
if not os.listdir(dirName):
|
||||
os.rmdir(dirName)
|
||||
dirName = os.path.dirname(dirName)
|
||||
else:
|
||||
break
|
||||
except OSError:
|
||||
break
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
extsToClean = ('.pyc', '.pyo', '.fsz', '.deltafsz', '.dat', '.log')
|
||||
def cleanFolder(folder, exts=extsToClean, folders=(), verbose=False):
|
||||
'''This function allows to remove, in p_folder and subfolders, any file
|
||||
whose extension is in p_exts, and any folder whose name is in
|
||||
p_folders.'''
|
||||
if verbose: print(('Cleaning folder %s...' % folder))
|
||||
# Remove files with an extension listed in p_exts
|
||||
if exts:
|
||||
for root, dirs, files in os.walk(folder):
|
||||
for fileName in files:
|
||||
ext = os.path.splitext(fileName)[1]
|
||||
if (ext in exts) or ext.endswith('~'):
|
||||
fileToRemove = os.path.join(root, fileName)
|
||||
if verbose: print(('Removing file %s...' % fileToRemove))
|
||||
os.remove(fileToRemove)
|
||||
# Remove folders whose names are in p_folders.
|
||||
if folders:
|
||||
for root, dirs, files in os.walk(folder):
|
||||
for folderName in dirs:
|
||||
if folderName in folders:
|
||||
toDelete = os.path.join(root, folderName)
|
||||
if verbose: print(('Removing folder %s...' % toDelete))
|
||||
FolderDeleter.delete(toDelete)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def resolvePath(path):
|
||||
'''p_path is a file path that can contain occurences of "." and "..". This
|
||||
function resolves them and procuces a minimal path.'''
|
||||
res = []
|
||||
for elem in path.split(os.sep):
|
||||
if elem == '.': pass
|
||||
elif elem == '..': res.pop()
|
||||
else: res.append(elem)
|
||||
return os.sep.join(res)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def copyFolder(source, dest, cleanDest=False):
|
||||
'''Copies the content of folder p_source to folder p_dest. p_dest is
|
||||
created, with intermediary subfolders if required. If p_cleanDest is
|
||||
True, it removes completely p_dest if it existed. Else, content of
|
||||
p_source will be added to possibly existing content in p_dest, excepted
|
||||
if file names corresponds. In this case, file in p_source will overwrite
|
||||
file in p_dest.'''
|
||||
dest = os.path.abspath(dest)
|
||||
# Delete the dest folder if required
|
||||
if os.path.exists(dest) and cleanDest:
|
||||
FolderDeleter.delete(dest)
|
||||
# Create the dest folder if it does not exist
|
||||
if not os.path.exists(dest):
|
||||
os.makedirs(dest)
|
||||
# Copy the content of p_source to p_dest.
|
||||
for name in os.listdir(source):
|
||||
sourceName = os.path.join(source, name)
|
||||
destName = os.path.join(dest, name)
|
||||
if os.path.isfile(sourceName):
|
||||
# Copy a single file
|
||||
shutil.copy(sourceName, destName)
|
||||
elif os.path.isdir(sourceName):
|
||||
# Copy a subfolder (recursively)
|
||||
copyFolder(sourceName, destName)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def encodeData(data, encoding=None):
|
||||
'''Applies some p_encoding to string p_data, but only if an p_encoding is
|
||||
specified.'''
|
||||
if not encoding: return data
|
||||
return data.encode(encoding)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def copyData(data, target, targetMethod, type='string', encoding=None,
|
||||
chunkSize=1024):
|
||||
'''Copies p_data to a p_target, using p_targetMethod. For example, it copies
|
||||
p_data which is a string containing the binary content of a file, to
|
||||
p_target, which can be a HTTP connection or a file object.
|
||||
|
||||
p_targetMethod can be "write" (files) or "send" (HTTP connections) or ...
|
||||
p_type can be "string", "file" or "zope". In the latter case it is an
|
||||
instance of OFS.Image.File. If p_type is "file", one may, in p_chunkSize,
|
||||
specify the amount of bytes transmitted at a time.
|
||||
|
||||
If an p_encoding is specified, it is applied on p_data before copying.
|
||||
|
||||
Note that if the p_target is a Python file, it must be opened in a way
|
||||
that is compatible with the content of p_data, ie file('myFile.doc','wb')
|
||||
if content is binary.'''
|
||||
dump = getattr(target, targetMethod)
|
||||
if not type or (type == 'string'): dump(encodeData(data, encoding))
|
||||
elif type == 'file':
|
||||
while True:
|
||||
chunk = data.read(chunkSize)
|
||||
if not chunk: break
|
||||
dump(encodeData(chunk, encoding))
|
||||
elif type == 'zope':
|
||||
# A OFS.Image.File instance can be split into several chunks
|
||||
if isinstance(data.data, str): # One chunk
|
||||
dump(encodeData(data.data, encoding))
|
||||
else:
|
||||
# Several chunks
|
||||
data = data.data
|
||||
while data is not None:
|
||||
dump(encodeData(data.data, encoding))
|
||||
data = data.__next__
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def splitList(l, sub):
|
||||
'''Returns a list that was build from list p_l whose elements were
|
||||
re-grouped into sub-lists of p_sub elements.
|
||||
|
||||
For example, if l = [1,2,3,4,5] and sub = 3, the method returns
|
||||
[ [1,2,3], [4,5] ].'''
|
||||
res = []
|
||||
i = -1
|
||||
for elem in l:
|
||||
i += 1
|
||||
if (i % sub) == 0:
|
||||
# A new sub-list must be created
|
||||
res.append([elem])
|
||||
else:
|
||||
res[-1].append(elem)
|
||||
return res
|
||||
|
||||
class IterSub:
|
||||
'''Iterator over a list of lists.'''
|
||||
def __init__(self, l):
|
||||
self.l = l
|
||||
self.i = 0 # The current index in the main list
|
||||
self.j = 0 # The current index in the current sub-list
|
||||
def __iter__(self): return self
|
||||
def next(self):
|
||||
# Get the next ith sub-list
|
||||
if (self.i + 1) > len(self.l): raise StopIteration
|
||||
sub = self.l[self.i]
|
||||
if (self.j + 1) > len(sub):
|
||||
self.i += 1
|
||||
self.j = 0
|
||||
return self.next()
|
||||
else:
|
||||
elem = sub[self.j]
|
||||
self.j += 1
|
||||
return elem
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def flipDict(d):
|
||||
'''Flips dict p_d: keys become values, values become keys. p_d is left
|
||||
untouched: a new, flipped, dict is returned.'''
|
||||
res = {}
|
||||
for k, v in d.iteritems(): res[v] = k
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class Traceback:
|
||||
'''Dumps the last traceback into a string'''
|
||||
@staticmethod
|
||||
def get(last=None):
|
||||
'''Gets the traceback as a string. If p_last is given (must be an
|
||||
integer value), only the p_last lines of the traceback will be
|
||||
included. It can be useful for pod/px tracebacks: when an exception
|
||||
occurs while evaluating a complex tree of buffers, most of the
|
||||
traceback lines concern uninteresting buffer/action-related recursive
|
||||
calls.'''
|
||||
res = []
|
||||
excType, excValue, tb = sys.exc_info()
|
||||
tbLines = traceback.format_tb(tb)
|
||||
for tbLine in tbLines:
|
||||
res.append(' %s' % tbLine)
|
||||
res.append(' %s: %s' % (str(excType), str(excValue)))
|
||||
if last: res = res[-last:]
|
||||
return ''.join(res)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def getOsTempFolder():
|
||||
tmp = '/tmp'
|
||||
if os.path.exists(tmp) and os.path.isdir(tmp):
|
||||
res = tmp
|
||||
elif 'TMP' in os.environ:
|
||||
res = os.environ['TMP']
|
||||
elif 'TEMP' in os.environ:
|
||||
res = os.environ['TEMP']
|
||||
else:
|
||||
raise Exception("Sorry, I can't find a temp folder on your machine.")
|
||||
return res
|
||||
|
||||
def getTempFileName(prefix='', extension=''):
|
||||
'''Returns the absolute path to a unique file name in the OS temp folder.
|
||||
The caller will then be able to create a file with this name.
|
||||
|
||||
A p_prefix to this file can be provided. If an p_extension is provided,
|
||||
it will be appended to the name. Both dotted and not dotted versions
|
||||
of p_extension are allowed (ie, ".pdf" or "pdf").'''
|
||||
res = '%s/%s_%f' % (getOsTempFolder(), prefix, time.time())
|
||||
if extension:
|
||||
if extension.startswith('.'): res += extension
|
||||
else: res += '.' + extension
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def executeCommand(cmd):
|
||||
'''Executes command p_cmd and returns the content of its stderr'''
|
||||
childStdIn, childStdOut, childStdErr = os.popen3(cmd)
|
||||
res = childStdErr.read()
|
||||
childStdIn.close(); childStdOut.close(); childStdErr.close()
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
charsIgnore = u'.,:;*+=~?%^\'’"<>{}[]|\t\\°-'
|
||||
fileNameIgnore = charsIgnore + u' $£€/\r\n'
|
||||
extractIgnore = charsIgnore + '()'
|
||||
alphaRex = re.compile('[a-zA-Z]')
|
||||
alphanumRex = re.compile('[a-zA-Z0-9]')
|
||||
|
||||
def normalizeString(s, usage='fileName'):
|
||||
'''Returns a version of string p_s whose special chars (like accents) have
|
||||
been replaced with normal chars. Moreover, if p_usage is:
|
||||
* fileName: it removes any char that can't be part of a file name;
|
||||
* alphanum: it removes any non-alphanumeric char;
|
||||
* alpha: it removes any non-letter char.
|
||||
'''
|
||||
strNeeded = isinstance(s, str)
|
||||
# We work in unicode. Convert p_s to unicode if not unicode.
|
||||
if isinstance(s, str):
|
||||
try:
|
||||
s = s.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
# Another encoding may be in use
|
||||
s = s.decode('latin-1')
|
||||
elif not isinstance(s, str): s = str(s)
|
||||
# For extracted text, replace any unwanted char with a blank
|
||||
if usage == 'extractedText':
|
||||
res = ''
|
||||
for char in s:
|
||||
if char not in extractIgnore: res += char
|
||||
else: res += ' '
|
||||
s = res
|
||||
# Standardize special chars like accents
|
||||
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
|
||||
# Remove any other char, depending on p_usage
|
||||
if usage == 'fileName':
|
||||
# Remove any char that can't be found within a file name under Windows
|
||||
# or that could lead to problems with LibreOffice.
|
||||
res = ''
|
||||
for char in s:
|
||||
if char not in fileNameIgnore: res += char
|
||||
elif usage.startswith('alpha'):
|
||||
exec('rex = %sRex' % usage)
|
||||
res = ''
|
||||
for char in s:
|
||||
if rex.match(char): res += char
|
||||
elif usage == 'noAccents':
|
||||
res = s
|
||||
else:
|
||||
res = s
|
||||
# Re-code the result as a str if a str was given
|
||||
if strNeeded: res = res.encode('utf-8')
|
||||
return res
|
||||
|
||||
def normalizeText(s):
|
||||
'''Normalizes p_s: remove special chars, lowerizes it, etc, for indexing
|
||||
purposes.'''
|
||||
return normalizeString(s, usage='extractedText').strip().lower()
|
||||
|
||||
def keepDigits(s):
|
||||
'''Returns string p_s whose non-number chars have been removed.'''
|
||||
if s is None: return s
|
||||
res = ''
|
||||
for c in s:
|
||||
if c.isdigit(): res += c
|
||||
return res
|
||||
|
||||
def getStringDict(d):
|
||||
'''Gets the string literal corresponding to dict p_d.'''
|
||||
res = []
|
||||
for k, v in d.items():
|
||||
if type(v) not in sequenceTypes:
|
||||
if not isinstance(k, basestring): k = str(k)
|
||||
if not isinstance(v, basestring): v = str(v)
|
||||
value = "'%s':'%s'" % (k, v.replace("'", "\\'"))
|
||||
else:
|
||||
value = "'%s':%s" % (k, v)
|
||||
res.append(value)
|
||||
return '{%s}' % ','.join(res)
|
||||
|
||||
def stretchText(s, pattern, char=' '):
|
||||
'''Inserts occurrences of p_char within p_s according to p_pattern.
|
||||
Example: stretchText("475123456", (3,2,2,2)) returns '475 12 34 56'.'''
|
||||
res = ''
|
||||
i = 0
|
||||
for nb in pattern:
|
||||
j = 0
|
||||
while j < nb:
|
||||
res += s[i+j]
|
||||
j += 1
|
||||
res += char
|
||||
i += nb
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def formatNumber(n, sep=',', precision=2, tsep=' '):
|
||||
'''Returns a string representation of number p_n, which can be a float
|
||||
or integer. p_sep is the decimal separator to use. p_precision is the
|
||||
number of digits to keep in the decimal part for producing a nice rounded
|
||||
string representation. p_tsep is the "thousands" separator.'''
|
||||
if n == None: return ''
|
||||
# Manage precision
|
||||
if precision == None:
|
||||
res = str(n)
|
||||
else:
|
||||
format = '%%.%df' % precision
|
||||
res = format % n
|
||||
# Use the correct decimal separator
|
||||
res = res.replace('.', sep)
|
||||
# Insert p_tsep every 3 chars in the integer part of the number
|
||||
splitted = res.split(sep)
|
||||
res = ''
|
||||
if len(splitted[0]) < 4: res = splitted[0]
|
||||
else:
|
||||
i = len(splitted[0])-1
|
||||
j = 0
|
||||
while i >= 0:
|
||||
j += 1
|
||||
res = splitted[0][i] + res
|
||||
if (j % 3) == 0:
|
||||
res = tsep + res
|
||||
i -= 1
|
||||
# Add the decimal part if not 0
|
||||
if len(splitted) > 1:
|
||||
try:
|
||||
decPart = int(splitted[1])
|
||||
if decPart != 0:
|
||||
res += sep + splitted[1]
|
||||
except ValueError:
|
||||
# This exception may occur when the float value has an "exp"
|
||||
# part, like in this example: 4.345e-05
|
||||
res += sep + splitted[1]
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def lower(s):
|
||||
'''French-accents-aware variant of string.lower.'''
|
||||
isUnicode = isinstance(s, str)
|
||||
if not isUnicode: s = s.decode('utf-8')
|
||||
res = s.lower()
|
||||
if not isUnicode: res = res.encode('utf-8')
|
||||
return res
|
||||
|
||||
def upper(s):
|
||||
'''French-accents-aware variant of string.upper.'''
|
||||
isUnicode = isinstance(s, str)
|
||||
if not isUnicode: s = s.decode('utf-8')
|
||||
res = s.upper()
|
||||
if not isUnicode: res = res.encode('utf-8')
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
typeLetters = {'b': bool, 'i': int, 'j': int, 'f':float, 's':str, 'u':str,
|
||||
'l': list, 'd': dict}
|
||||
caExts = {'py': ('.py', '.vpy', '.cpy'), 'xml': ('.pt', '.cpt', '.xml')}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class CodeAnalysis:
|
||||
'''This class holds information about some code analysis (line counts) that
|
||||
spans some folder hierarchy.'''
|
||||
def __init__(self, name):
|
||||
self.name = name # Let's give a name for the analysis
|
||||
self.numberOfFiles = 0 # The total number of analysed files
|
||||
self.emptyLines = 0 # The number of empty lines within those files
|
||||
self.commentLines = 0 # The number of comment lines
|
||||
# A code line is defined as anything that is not an empty or comment
|
||||
# line.
|
||||
self.codeLines = 0
|
||||
|
||||
def numberOfLines(self):
|
||||
'''Computes the total number of lines within analysed files.'''
|
||||
return self.emptyLines + self.commentLines + self.codeLines
|
||||
|
||||
def analyseXmlFile(self, theFile):
|
||||
'''Analyses the XML file named p_fileName.'''
|
||||
inDoc = False
|
||||
for line in theFile:
|
||||
stripped = line.strip()
|
||||
# Manage a comment
|
||||
if not inDoc and ((line.find('<!--') != -1) or \
|
||||
(line.find('<tal:comment ') != -1)):
|
||||
inDoc = True
|
||||
if inDoc:
|
||||
self.commentLines += 1
|
||||
if (line.find('-->') != -1) or \
|
||||
(line.find('</tal:comment>') != -1):
|
||||
inDoc = False
|
||||
continue
|
||||
# Manage an empty line
|
||||
if not stripped:
|
||||
self.emptyLines += 1
|
||||
else:
|
||||
self.codeLines += 1
|
||||
|
||||
docSeps = ('"""', "'''")
|
||||
def isPythonDoc(self, line, start, isStart=False):
|
||||
'''Returns True if we find, in p_line, the start of a docstring (if
|
||||
p_start is True) or the end of a docstring (if p_start is False).
|
||||
p_isStart indicates if p_line is the start of the docstring.'''
|
||||
if start:
|
||||
res = line.startswith(self.docSeps[0]) or \
|
||||
line.startswith(self.docSeps[1])
|
||||
else:
|
||||
sepOnly = (line == self.docSeps[0]) or (line == self.docSeps[1])
|
||||
if sepOnly:
|
||||
# If the line contains the separator only, is this the start or
|
||||
# the end of the docstring?
|
||||
if isStart: res = False
|
||||
else: res = True
|
||||
else:
|
||||
res = line.endswith(self.docSeps[0]) or \
|
||||
line.endswith(self.docSeps[1])
|
||||
return res
|
||||
|
||||
def analysePythonFile(self, theFile):
|
||||
'''Analyses the Python file named p_fileName.'''
|
||||
# Are we in a docstring ?
|
||||
inDoc = False
|
||||
for line in theFile:
|
||||
stripped = line.strip()
|
||||
# Manage a line that is within a docstring
|
||||
inDocStart = False
|
||||
if not inDoc and self.isPythonDoc(stripped, start=True):
|
||||
inDoc = True
|
||||
inDocStart = True
|
||||
if inDoc:
|
||||
self.commentLines += 1
|
||||
if self.isPythonDoc(stripped, start=False, isStart=inDocStart):
|
||||
inDoc = False
|
||||
continue
|
||||
# Manage an empty line
|
||||
if not stripped:
|
||||
self.emptyLines += 1
|
||||
continue
|
||||
# Manage a comment line
|
||||
if line.startswith('#'):
|
||||
self.commentLines += 1
|
||||
continue
|
||||
# If we are here, we have a code line.
|
||||
self.codeLines += 1
|
||||
|
||||
def analyseFile(self, fileName):
|
||||
'''Analyses file named p_fileName.'''
|
||||
self.numberOfFiles += 1
|
||||
theFile = file(fileName)
|
||||
ext = os.path.splitext(fileName)[1]
|
||||
if ext in caExts['py']: self.analysePythonFile(theFile)
|
||||
elif ext in caExts['xml']: self.analyseXmlFile(theFile)
|
||||
theFile.close()
|
||||
|
||||
def printReport(self):
|
||||
'''Returns the analysis report as a string, only if there is at least
|
||||
one analysed line.'''
|
||||
lines = self.numberOfLines()
|
||||
if not lines: return
|
||||
commentRate = (self.commentLines / float(lines)) * 100.0
|
||||
blankRate = (self.emptyLines / float(lines)) * 100.0
|
||||
print(('%s: %d files, %d lines (%.0f%% comments, %.0f%% blank)' % \
|
||||
(self.name, self.numberOfFiles, lines, commentRate, blankRate)))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class LinesCounter:
|
||||
'''Counts and classifies the lines of code within a folder hierarchy.'''
|
||||
defaultExcludes = ('%s.svn' % os.sep, '%s.bzr' % os.sep, '%stmp' % os.sep,
|
||||
'%stemp' % os.sep)
|
||||
|
||||
def __init__(self, folderOrModule, excludes=None):
|
||||
if isinstance(folderOrModule, str):
|
||||
# It is the path of some folder
|
||||
self.folder = folderOrModule
|
||||
else:
|
||||
# It is a Python module
|
||||
self.folder = os.path.dirname(folderOrModule.__file__)
|
||||
# These dict will hold information about analysed files.
|
||||
self.python = { False: CodeAnalysis('Python'),
|
||||
True: CodeAnalysis('Python (test)')}
|
||||
self.xml = { False: CodeAnalysis('XML'),
|
||||
True: CodeAnalysis('XML (test)')}
|
||||
# Are we currently analysing real or test code?
|
||||
self.inTest = False
|
||||
# Which paths to exclude from the analysis?
|
||||
self.excludes = list(self.defaultExcludes)
|
||||
if excludes: self.excludes += excludes
|
||||
|
||||
def printReport(self):
|
||||
'''Displays on stdout a small analysis report about self.folder.'''
|
||||
total = 0
|
||||
for type in ('python', 'xml'):
|
||||
for zone in (False, True):
|
||||
analyser = getattr(self, type)[zone]
|
||||
if analyser.numberOfFiles:
|
||||
analyser.printReport()
|
||||
total += analyser.numberOfLines()
|
||||
print 'Total (including commented and blank): ***', total, '***'
|
||||
|
||||
def isExcluded(self, path):
|
||||
'''Must p_path be excluded from the analysis?'''
|
||||
for excl in self.excludes:
|
||||
if excl in path: return True
|
||||
|
||||
def run(self):
|
||||
'''Let's start the analysis of self.folder.'''
|
||||
# The test markers will allow us to know if we are analysing test code
|
||||
# or real code within a given part of self.folder code hierarchy.
|
||||
testMarker1 = '%stest%s' % (os.sep, os.sep)
|
||||
testMarker2 = '%stest' % os.sep
|
||||
testMarker3 = '%stests%s' % (os.sep, os.sep)
|
||||
testMarker4 = '%stests' % os.sep
|
||||
j = os.path.join
|
||||
for root, folders, files in os.walk(self.folder):
|
||||
if self.isExcluded(root): continue
|
||||
# Are we in real code or in test code ?
|
||||
self.inTest = False
|
||||
if root.endswith(testMarker2) or (root.find(testMarker1) != -1) or \
|
||||
root.endswith(testMarker4) or (root.find(testMarker3) != -1):
|
||||
self.inTest = True
|
||||
# Scan the files in this folder
|
||||
for fileName in files:
|
||||
ext = os.path.splitext(fileName)[1]
|
||||
if ext in caExts['py']:
|
||||
self.python[self.inTest].analyseFile(j(root, fileName))
|
||||
elif ext in caExts['xml']:
|
||||
self.xml[self.inTest].analyseFile(j(root, fileName))
|
||||
self.printReport()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
CONVERSION_ERROR = 'An error occurred. %s'
|
||||
class FileWrapper:
|
||||
'''When you get, from an appy object, the value of a File attribute, you
|
||||
get an instance of this class.'''
|
||||
def __init__(self, zopeFile):
|
||||
'''This constructor is only used by Appy to create a nice File instance
|
||||
from a Zope corresponding instance (p_zopeFile). If you need to
|
||||
create a new file and assign it to a File attribute, use the
|
||||
attribute setter, do not create yourself an instance of this
|
||||
class.'''
|
||||
d = self.__dict__
|
||||
d['_zopeFile'] = zopeFile # Not for you!
|
||||
d['name'] = zopeFile.filename
|
||||
d['content'] = zopeFile.data
|
||||
d['mimeType'] = zopeFile.content_type
|
||||
d['size'] = zopeFile.size # In bytes
|
||||
|
||||
def __setattr__(self, name, v):
|
||||
d = self.__dict__
|
||||
if name == 'name':
|
||||
self._zopeFile.filename = v
|
||||
d['name'] = v
|
||||
elif name == 'content':
|
||||
self._zopeFile.update_data(v, self.mimeType, len(v))
|
||||
d['content'] = v
|
||||
d['size'] = len(v)
|
||||
elif name == 'mimeType':
|
||||
self._zopeFile.content_type = self.mimeType = v
|
||||
else:
|
||||
raise 'Impossible to set attribute %s. "Settable" attributes ' \
|
||||
'are "name", "content" and "mimeType".' % name
|
||||
|
||||
def dump(self, filePath=None, format=None, tool=None):
|
||||
'''Writes the file on disk. If p_filePath is specified, it is the
|
||||
path name where the file will be dumped; folders mentioned in it
|
||||
must exist. If not, the file will be dumped in the OS temp folder.
|
||||
The absolute path name of the dumped file is returned.
|
||||
If an error occurs, the method returns None. If p_format is
|
||||
specified, LibreOffice will be called for converting the dumped file
|
||||
to the desired format. In this case, p_tool, a Appy tool, must be
|
||||
provided. Indeed, any Appy tool contains parameters for contacting
|
||||
LibreOffice in server mode.'''
|
||||
if not filePath:
|
||||
filePath = '%s/file%f.%s' % (getOsTempFolder(), time.time(),
|
||||
normalizeString(self.name))
|
||||
f = file(filePath, 'w')
|
||||
if self.content.__class__.__name__ == 'Pdata':
|
||||
# The file content is splitted in several chunks.
|
||||
f.write(self.content.data)
|
||||
nextPart = self.content.__next__
|
||||
while nextPart:
|
||||
f.write(nextPart.data)
|
||||
nextPart = nextPart.__next__
|
||||
else:
|
||||
# Only one chunk
|
||||
f.write(self.content)
|
||||
f.close()
|
||||
if format:
|
||||
if not tool: return
|
||||
# Convert the dumped file using OpenOffice
|
||||
errorMessage = tool.convert(filePath, format)
|
||||
# Even if we have an "error" message, it could be a simple warning.
|
||||
# So we will continue here and, as a subsequent check for knowing if
|
||||
# an error occurred or not, we will test the existence of the
|
||||
# converted file (see below).
|
||||
os.remove(filePath)
|
||||
# Return the name of the converted file.
|
||||
baseName, ext = os.path.splitext(filePath)
|
||||
if (ext == '.%s' % format):
|
||||
filePath = '%s.res.%s' % (baseName, format)
|
||||
else:
|
||||
filePath = '%s.%s' % (baseName, format)
|
||||
if not os.path.exists(filePath):
|
||||
tool.log(CONVERSION_ERROR % errorMessage, type='error')
|
||||
return
|
||||
return filePath
|
||||
|
||||
def copy(self):
|
||||
'''Returns a copy of this file'''
|
||||
return FileWrapper(self._zopeFile._getCopy(self._zopeFile))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def getMimeType(fileName):
|
||||
'''Tries to guess mime type from p_fileName'''
|
||||
res, encoding = mimetypes.guess_type(fileName)
|
||||
if not res:
|
||||
if fileName.endswith('.po'):
|
||||
res = 'text/plain'
|
||||
encoding = 'utf-8'
|
||||
if not res: return ''
|
||||
if not encoding: return res
|
||||
return '%s;;charset=%s' % (res, encoding)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
class WhitespaceCruncher:
|
||||
'''Takes care of removing unnecessary whitespace in several contexts'''
|
||||
whitechars = u' \r\t\n' # Chars considered as whitespace
|
||||
allWhitechars = whitechars + u' ' # nbsp
|
||||
@staticmethod
|
||||
def crunch(s, previous=None):
|
||||
'''Return a version of p_s (expected to be a unicode string) where all
|
||||
"whitechars" are:
|
||||
* converted to real whitespace;
|
||||
* reduced in such a way that there cannot be 2 consecutive
|
||||
whitespace chars.
|
||||
If p_previous is given, those rules must also apply globally to
|
||||
previous+s.'''
|
||||
res = ''
|
||||
# Initialise the previous char
|
||||
if previous:
|
||||
previousChar = previous[-1]
|
||||
else:
|
||||
previousChar = u''
|
||||
for char in s:
|
||||
if char in WhitespaceCruncher.whitechars:
|
||||
# Include the current whitechar in the result if the previous
|
||||
# char is not a whitespace or nbsp.
|
||||
if not previousChar or \
|
||||
(previousChar not in WhitespaceCruncher.allWhitechars):
|
||||
res += u' '
|
||||
else: res += char
|
||||
previousChar = char
|
||||
# "res" can be a single whitespace. It is up to the caller method to
|
||||
# identify when this single whitespace must be kept or crunched.
|
||||
return res
|
||||
# ------------------------------------------------------------------------------
|
1195
appy/shared/xml_parser.py
Normal file
1195
appy/shared/xml_parser.py
Normal file
File diff suppressed because it is too large
Load diff
94
appy/shared/zip.py
Normal file
94
appy/shared/zip.py
Normal file
|
@ -0,0 +1,94 @@
|
|||
'''Functions for (un)zipping files'''
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
import os, os.path, zipfile, time
|
||||
from appy.shared import mimeTypes
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def unzip(f, folder, odf=False):
|
||||
'''Unzips file p_f into p_folder. p_f can be any anything accepted by the
|
||||
zipfile.ZipFile constructor. p_folder must exist.
|
||||
|
||||
If p_odf is True, p_f is considered to be an odt or ods file and this
|
||||
function will return a dict containing the content of content.xml and
|
||||
styles.xml from the zipped file.'''
|
||||
zipFile = zipfile.ZipFile(f)
|
||||
if odf: res = {}
|
||||
else: res = None
|
||||
for zippedFile in zipFile.namelist():
|
||||
# Before writing the zippedFile into p_folder, create the intermediary
|
||||
# subfolder(s) if needed.
|
||||
fileName = None
|
||||
if zippedFile.endswith('/') or zippedFile.endswith(os.sep):
|
||||
# This is an empty folder. Create it nevertheless. If zippedFile
|
||||
# starts with a '/', os.path.join will consider it an absolute
|
||||
# path and will throw away folder.
|
||||
os.makedirs(os.path.join(folder, zippedFile.lstrip('/')))
|
||||
else:
|
||||
fileName = os.path.basename(zippedFile)
|
||||
folderName = os.path.dirname(zippedFile)
|
||||
fullFolderName = folder
|
||||
if folderName:
|
||||
fullFolderName = os.path.join(fullFolderName, folderName)
|
||||
if not os.path.exists(fullFolderName):
|
||||
os.makedirs(fullFolderName)
|
||||
# Unzip the file in folder
|
||||
if fileName:
|
||||
fullFileName = os.path.join(fullFolderName, fileName)
|
||||
f = open(fullFileName, 'wb')
|
||||
fileContent = zipFile.read(zippedFile)
|
||||
if odf and not folderName:
|
||||
# content.xml and others may reside in subfolders. Get only the
|
||||
# one in the root folder.
|
||||
if fileName == 'content.xml':
|
||||
res['content.xml'] = fileContent
|
||||
elif fileName == 'styles.xml':
|
||||
res['styles.xml'] = fileContent
|
||||
elif fileName == 'mimetype':
|
||||
res['mimetype'] = fileContent
|
||||
f.write(fileContent)
|
||||
f.close()
|
||||
zipFile.close()
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def zip(f, folder, odf=False):
|
||||
'''Zips the content of p_folder into the zip file whose (preferably)
|
||||
absolute filename is p_f. If p_odf is True, p_folder is considered to
|
||||
contain the standard content of an ODF file (content.xml,...). In this
|
||||
case, some rules must be respected while building the zip (see below).'''
|
||||
# Remove p_f if it exists
|
||||
if os.path.exists(f): os.remove(f)
|
||||
try:
|
||||
zipFile = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED)
|
||||
except RuntimeError:
|
||||
zipFile = zipfile.ZipFile(f, 'w')
|
||||
# If p_odf is True, insert first the file "mimetype" (uncompressed), in
|
||||
# order to be compliant with the OpenDocument Format specification,
|
||||
# section 17.4, that expresses this restriction. Else, libraries like
|
||||
# "magic", under Linux/Unix, are unable to detect the correct mimetype for
|
||||
# a pod result (it simply recognizes it as a "application/zip" and not a
|
||||
# "application/vnd.oasis.opendocument.text)".
|
||||
if odf:
|
||||
mimetypeFile = os.path.join(folder, 'mimetype')
|
||||
# This file may not exist (presumably, ods files from Google Drive)
|
||||
if not os.path.exists(mimetypeFile):
|
||||
f = file(mimetypeFile, 'w')
|
||||
f.write(mimeTypes[os.path.splitext(f)[-1][1:]])
|
||||
f.close()
|
||||
zipFile.write(mimetypeFile, 'mimetype', zipfile.ZIP_STORED)
|
||||
for dir, dirnames, filenames in os.walk(folder):
|
||||
for name in filenames:
|
||||
folderName = dir[len(folder)+1:]
|
||||
# For p_odf files, ignore file "mimetype" that was already inserted
|
||||
if odf and (folderName == '') and (name == 'mimetype'): continue
|
||||
zipFile.write(os.path.join(dir,name), os.path.join(folderName,name))
|
||||
if not dirnames and not filenames:
|
||||
# This is an empty leaf folder. We must create an entry in the
|
||||
# zip for him.
|
||||
folderName = dir[len(folder):]
|
||||
zInfo = zipfile.ZipInfo("%s/" % folderName, time.localtime()[:6])
|
||||
zInfo.external_attr = 48
|
||||
zipFile.writestr(zInfo, '')
|
||||
zipFile.close()
|
||||
# ------------------------------------------------------------------------------
|
Loading…
Add table
Add a link
Reference in a new issue