2010-01-18 Miguel de Dios <miguel.dedios@artica.es>

* include/javascript/OpenLayers/*: clean and reduce the size of distribution
	of OpenLayers.



git-svn-id: https://svn.code.sf.net/p/pandora/code/trunk@2282 c3f86ba8-e40f-0410-aaad-9ba5e7f4b01f
This commit is contained in:
mdtrooper 2010-01-18 15:42:08 +00:00
parent 9049891744
commit 072799f1e1
12 changed files with 0 additions and 3250 deletions

View File

@ -1,14 +0,0 @@
This directory contains tools used in the packaging or deployment of OpenLayers.
Javascript minimizing tools:
* jsmin.c, jsmin.py:
jsmin.py is a direct translation of the jsmin.c code into Python. jsmin.py
will therefore run anyplace Python runs... but at significantly slower speed.
* shrinksafe.py
shrinksafe.py calls out to a third party javascript shrinking service. This
creates file sizes about 4% smaller (as of commit 501) of the OpenLayers
code. However, this also has the side effect of making you dependant on the
web service -- and since that service sometimes goes dead, it's risky to
depend on it.

View File

@ -1,251 +0,0 @@
#!/usr/bin/env python
import sys
import os
import re
import urllib2
import time
from xml.dom.minidom import Document
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
import lxml.etree as ElementTree
missing_deps = False
try:
import simplejson
from BeautifulSoup import BeautifulSoup
except ImportError, E:
missing_deps = E
feedName = "example-list.xml"
feedPath = "http://openlayers.org/dev/examples/"
def getListOfOnlineExamples(baseUrl):
"""
useful if you want to get a list of examples a url. not used by default.
"""
html = urllib2.urlopen(baseUrl)
soup = BeautifulSoup(html)
examples = soup.findAll('li')
examples = [example.find('a').get('href') for example in examples]
examples = [example for example in examples if example.endswith('.html')]
examples = [example for example in examples]
return examples
def getListOfExamples(relPath):
"""
returns list of .html filenames within a given path - excludes example-list.html
"""
examples = os.listdir(relPath)
examples = [example for example in examples if example.endswith('.html') and example != "example-list.html"]
return examples
def getExampleHtml(location):
"""
returns html of a specific example that is available online or locally
"""
print '.',
if location.startswith('http'):
return urllib2.urlopen(location).read()
else:
f = open(location)
html = f.read()
f.close()
return html
def extractById(soup, tagId, value=None):
"""
returns full contents of a particular tag id
"""
beautifulTag = soup.find(id=tagId)
if beautifulTag:
if beautifulTag.contents:
value = str(beautifulTag.renderContents()).strip()
value = value.replace('\t','')
value = value.replace('\n','')
return value
def getRelatedClasses(html):
"""
parses the html, and returns a list of all OpenLayers Classes
used within (ie what parts of OL the javascript uses).
"""
rawstr = r'''(?P<class>OpenLayers\..*?)\('''
return re.findall(rawstr, html)
def parseHtml(html,ids):
"""
returns dictionary of items of interest
"""
soup = BeautifulSoup(html)
d = {}
for tagId in ids:
d[tagId] = extractById(soup,tagId)
#classes should eventually be parsed from docs - not automatically created.
classes = getRelatedClasses(html)
d['classes'] = classes
return d
def getSvnInfo(path):
h = os.popen("svn info %s --xml" % path)
tree = ElementTree.fromstring(h.read())
h.close()
d = {
'url': tree.findtext('entry/url'),
'author': tree.findtext('entry/commit/author'),
'date': tree.findtext('entry/commit/date')
}
return d
def createFeed(examples):
doc = Document()
atomuri = "http://www.w3.org/2005/Atom"
feed = doc.createElementNS(atomuri, "feed")
feed.setAttribute("xmlns", atomuri)
title = doc.createElementNS(atomuri, "title")
title.appendChild(doc.createTextNode("OpenLayers Examples"))
feed.appendChild(title)
link = doc.createElementNS(atomuri, "link")
link.setAttribute("rel", "self")
link.setAttribute("href", feedPath + feedName)
modtime = time.strftime("%Y-%m-%dT%I:%M:%SZ", time.gmtime())
id = doc.createElementNS(atomuri, "id")
id.appendChild(doc.createTextNode("%s%s#%s" % (feedPath, feedName, modtime)))
feed.appendChild(id)
updated = doc.createElementNS(atomuri, "updated")
updated.appendChild(doc.createTextNode(modtime))
feed.appendChild(updated)
examples.sort(key=lambda x:x["modified"])
for example in sorted(examples, key=lambda x:x["modified"], reverse=True):
entry = doc.createElementNS(atomuri, "entry")
title = doc.createElementNS(atomuri, "title")
title.appendChild(doc.createTextNode(example["title"] or example["example"]))
entry.appendChild(title)
link = doc.createElementNS(atomuri, "link")
link.setAttribute("href", "%s%s" % (feedPath, example["example"]))
entry.appendChild(link)
summary = doc.createElementNS(atomuri, "summary")
summary.appendChild(doc.createTextNode(example["shortdesc"] or example["example"]))
entry.appendChild(summary)
updated = doc.createElementNS(atomuri, "updated")
updated.appendChild(doc.createTextNode(example["modified"]))
entry.appendChild(updated)
author = doc.createElementNS(atomuri, "author")
name = doc.createElementNS(atomuri, "name")
name.appendChild(doc.createTextNode(example["author"]))
author.appendChild(name)
entry.appendChild(author)
id = doc.createElementNS(atomuri, "id")
id.appendChild(doc.createTextNode("%s%s#%s" % (feedPath, example["example"], example["modified"])))
entry.appendChild(id)
feed.appendChild(entry)
doc.appendChild(feed)
return doc
def wordIndex(examples):
"""
Create an inverted index based on words in title and shortdesc. Keys are
lower cased words. Values are dictionaries with example index keys and
count values.
"""
index = {}
unword = re.compile("\\W+")
keys = ["shortdesc", "title"]
for i in range(len(examples)):
for key in keys:
text = examples[i][key]
if text:
words = unword.split(text)
for word in words:
if word:
word = word.lower()
if index.has_key(word):
if index[word].has_key(i):
index[word][i] += 1
else:
index[word][i] = 1
else:
index[word] = {i: 1}
return index
if __name__ == "__main__":
if missing_deps:
print "This script requires simplejson and BeautifulSoup. You don't have them. \n(%s)" % E
sys.exit()
if len(sys.argv) > 1:
outFile = open(sys.argv[1],'w')
else:
outFile = open('../examples/example-list.js','w')
examplesLocation = '../examples'
print 'Reading examples from %s and writing out to %s' % (examplesLocation, outFile.name)
exampleList = []
docIds = ['title','shortdesc']
#comment out option to create docs from online resource
#examplesLocation = 'http://svn.openlayers.org/sandbox/docs/examples/'
#examples = getListOfOnlineExamples(examplesLocation)
examples = getListOfExamples(examplesLocation)
modtime = time.strftime("%Y-%m-%dT%I:%M:%SZ", time.gmtime())
for example in examples:
url = os.path.join(examplesLocation,example)
html = getExampleHtml(url)
tagvalues = parseHtml(html,docIds)
tagvalues['example'] = example
# add in svn info
d = getSvnInfo(url)
tagvalues["modified"] = d["date"] or modtime
tagvalues["author"] = d["author"] or "anonymous"
tagvalues['link'] = example
exampleList.append(tagvalues)
print
exampleList.sort(key=lambda x:x['example'].lower())
index = wordIndex(exampleList)
json = simplejson.dumps({"examples": exampleList, "index": index})
#give the json a global variable we can use in our js. This should be replaced or made optional.
json = 'var info=' + json
outFile.write(json)
outFile.close()
print "writing feed to ../examples/%s " % feedName
atom = open('../examples/%s' % feedName, 'w')
doc = createFeed(exampleList)
atom.write(doc.toxml())
atom.close()
print 'complete'

View File

@ -1,272 +0,0 @@
/* jsmin.c
2006-05-04
Copyright (c) 2002 Douglas Crockford (www.crockford.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The Software shall be used for Good, not Evil.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
static int theA;
static int theB;
static int theLookahead = EOF;
/* isAlphanum -- return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
*/
static int
isAlphanum(int c)
{
return ((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') ||
(c >= 'A' && c <= 'Z') || c == '_' || c == '$' || c == '\\' ||
c > 126);
}
/* get -- return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
*/
static int
get()
{
int c = theLookahead;
theLookahead = EOF;
if (c == EOF) {
c = getc(stdin);
}
if (c >= ' ' || c == '\n' || c == EOF) {
return c;
}
if (c == '\r') {
return '\n';
}
return ' ';
}
/* peek -- get the next character without getting it.
*/
static int
peek()
{
theLookahead = get();
return theLookahead;
}
/* next -- get the next character, excluding comments. peek() is used to see
if a '/' is followed by a '/' or '*'.
*/
static int
next()
{
int c = get();
if (c == '/') {
switch (peek()) {
case '/':
for (;;) {
c = get();
if (c <= '\n') {
return c;
}
}
case '*':
get();
for (;;) {
switch (get()) {
case '*':
if (peek() == '/') {
get();
return ' ';
}
break;
case EOF:
fprintf(stderr, "Error: JSMIN Unterminated comment.\n");
exit(1);
}
}
default:
return c;
}
}
return c;
}
/* action -- do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
*/
static void
action(int d)
{
switch (d) {
case 1:
putc(theA, stdout);
case 2:
theA = theB;
if (theA == '\'' || theA == '"') {
for (;;) {
putc(theA, stdout);
theA = get();
if (theA == theB) {
break;
}
if (theA <= '\n') {
fprintf(stderr,
"Error: JSMIN unterminated string literal: %c\n", theA);
exit(1);
}
if (theA == '\\') {
putc(theA, stdout);
theA = get();
}
}
}
case 3:
theB = next();
if (theB == '/' && (theA == '(' || theA == ',' || theA == '=' ||
theA == ':' || theA == '[' || theA == '!' || theA == '&' ||
theA == '|')) {
putc(theA, stdout);
putc(theB, stdout);
for (;;) {
theA = get();
if (theA == '/') {
break;
} else if (theA =='\\') {
putc(theA, stdout);
theA = get();
} else if (theA <= '\n') {
fprintf(stderr,
"Error: JSMIN unterminated Regular Expression literal.\n", theA);
exit(1);
}
putc(theA, stdout);
}
theB = next();
}
}
}
/* jsmin -- Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
*/
static void
jsmin()
{
theA = '\n';
action(3);
while (theA != EOF) {
switch (theA) {
case ' ':
if (isAlphanum(theB)) {
action(1);
} else {
action(2);
}
break;
case '\n':
switch (theB) {
case '{':
case '[':
case '(':
case '+':
case '-':
action(1);
break;
case ' ':
action(3);
break;
default:
if (isAlphanum(theB)) {
action(1);
} else {
action(2);
}
}
break;
default:
switch (theB) {
case ' ':
if (isAlphanum(theA)) {
action(1);
break;
}
action(3);
break;
case '\n':
switch (theA) {
case '}':
case ']':
case ')':
case '+':
case '-':
case '"':
case '\'':
action(1);
break;
default:
if (isAlphanum(theA)) {
action(1);
} else {
action(3);
}
}
break;
default:
action(1);
break;
}
}
}
}
/* main -- Output any command line arguments as comments
and then minify the input.
*/
extern int
main(int argc, char* argv[])
{
int i;
for (i = 1; i < argc; i += 1) {
fprintf(stdout, "// %s\n", argv[i]);
}
jsmin();
return 0;
}

View File

@ -1,216 +0,0 @@
#!/usr/bin/python
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
# /* jsmin.c
# 2007-01-08
#
# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# The Software shall be used for Good, not Evil.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# */
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if a '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = None
self.thaB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
if __name__ == '__main__':
import sys
jsm = JavascriptMinify()
jsm.minify(sys.stdin, sys.stdout)

View File

@ -1,252 +0,0 @@
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2008 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (filepath not in cfg.exclude):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
order = [] # List of filepaths to output, in a dependency satisfying order
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
print
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )

View File

@ -1,47 +0,0 @@
# Minimal Python Minimizer
# Copyright 2008, Christopher Schmidt
# Released under the MIT License
#
# Taken from: http://svn.crschmidt.net/personal/python/minimize.py
# $Id: minimize.py 6 2008-01-03 06:33:35Z crschmidt $
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
def strip_comments_helper(data):
"""remove all /* */ format comments and surrounding whitespace."""
p = re.compile(r'[\s]*/\*.*?\*/[\s]*', re.DOTALL)
return p.sub('',data)
def minimize(data, exclude=None):
"""Central function call. This will call all other compression
functions. To add further compression algorithms, simply add
functions whose names end in _helper which take a string as input
and return a more compressed string as output."""
for key, item in globals().iteritems():
if key.endswith("_helper"):
func_key = key[:-7]
if not exclude or not func_key in exclude:
data = item(data)
return data
if __name__ == "__main__":
import sys
print minimize(open(sys.argv[1]).read())

View File

@ -1,43 +0,0 @@
import re
import os
def run():
sourceDirectory = "../lib/OpenLayers"
allFiles = []
SUFFIX_JAVASCRIPT = ".js"
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
data = open(os.path.join(sourceDirectory, filepath)).read()
parents = re.search("OpenLayers.Class\((.*?){", data,
re.DOTALL)
if parents:
parents = [x.strip() for x in parents.group(1).strip().strip(",").split(",")]
else:
parents = []
cls = "OpenLayers.%s" % filepath.strip(".js").replace("/", ".")
allFiles.append([cls, parents])
return allFiles
print """
digraph name {
fontname = "Helvetica"
fontsize = 8
K = 0.6
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
"""
for i in run():
print i[0].replace(".", "_")
for item in i[1]:
if not item: continue
print "%s -> %s" % (i[0].replace(".","_"), item.replace(".", "_"))
print "; "
print """}"""

View File

@ -1,29 +0,0 @@
#!/bin/sh
VERSION=$1
svn export http://svn.openlayers.org/tags/openlayers/release-$VERSION OpenLayers-$VERSION
cd OpenLayers-$VERSION/build
./build.py full
cp OpenLayers.js ..
cd ..
mkdir doc/devdocs
mkdir doc/apidocs
rm tools/*.pyc
mkdir /www/openlayers/htdocs/api/$VERSION
cp OpenLayers.js /www/openlayers/htdocs/api/$VERSION
cp -a img/ /www/openlayers/htdocs/api/$VERSION
cp -a theme/ /www/openlayers/htdocs/api/$VERSION
cd ..
~/nd/NaturalDocs -i OpenLayers-$VERSION/lib -o HTML OpenLayers-$VERSION/doc/devdocs -p OpenLayers-$VERSION/doc_config -s Small OL
~/nd/NaturalDocs -i OpenLayers-$VERSION/lib -o HTML OpenLayers-$VERSION/doc/apidocs -p OpenLayers-$VERSION/apidoc_config -s Small OL
tar cvfz OpenLayers-$VERSION.tar.gz OpenLayers-$VERSION/
zip -9r OpenLayers-$VERSION.zip OpenLayers-$VERSION/
cp OpenLayers-$VERSION.* /www/openlayers/htdocs/download

View File

@ -1,54 +0,0 @@
#!/usr/bin/env python
#
# Script to provide a wrapper around the ShrinkSafe "web service"
# <http://shrinksafe.dojotoolkit.org/>
#
#
# We use this script for two reasons:
#
# * This avoids having to install and configure Java and the standalone
# ShrinkSafe utility.
#
# * The current ShrinkSafe standalone utility was broken when we last
# used it.
#
import sys
import urllib
import urllib2
URL_SHRINK_SAFE = "http://shrinksafe.dojotoolkit.org/shrinksafe.php"
# This would normally be dynamically generated:
BOUNDARY_MARKER = "---------------------------72288400411964641492083565382"
if __name__ == "__main__":
## Grab the source code
try:
sourceFilename = sys.argv[1]
except:
print "Usage: %s (<source filename>|-)" % sys.argv[0]
raise SystemExit
if sourceFilename == "-":
sourceCode = sys.stdin.read()
sourceFilename = "stdin.js"
else:
sourceCode = open(sourceFilename).read()
## Create the request replicating posting of the form from the web page
request = urllib2.Request(url=URL_SHRINK_SAFE)
request.add_header("Content-Type",
"multipart/form-data; boundary=%s" % BOUNDARY_MARKER)
request.add_data("""
--%s
Content-Disposition: form-data; name="shrinkfile[]"; filename="%s"
Content-Type: application/x-javascript
%s
""" % (BOUNDARY_MARKER, sourceFilename, sourceCode))
## Deliver the result
print urllib2.urlopen(request).read(),

View File

@ -1,260 +0,0 @@
#
# According to <http://www.vrplumber.com/programming/> this file
# is licensed under a BSD-style license. We only use the section
# originally by Tim Peters.
#
# TODO: The use of this code needs to be okayed by someone.
#
class RecursionError( OverflowError, ValueError ):
'''Unable to calculate result because of recursive structure'''
def sort(nodes, routes, noRecursion=1):
'''Passed a list of node IDs and a list of source,dest ID routes
attempt to create a list of stages where each sub list
is one stage in a process.
'''
children, parents = _buildChildrenLists(routes)
# first stage is those nodes
# having no incoming routes...
stage = []
stages = [stage]
taken = []
for node in nodes:
if (not parents.get(node)):
stage.append (node)
if nodes and not stage:
# there is no element which does not depend on
# some other element!!!
stage.append( nodes[0])
taken.extend( stage )
nodes = filter ( lambda x, l=stage: x not in l, nodes )
while nodes:
previousStageChildren = []
nodelen = len(nodes)
# second stage are those nodes
# which are direct children of the first stage
for node in stage:
for child in children.get (node, []):
if child not in previousStageChildren and child not in taken:
previousStageChildren.append(child)
elif child in taken and noRecursion:
raise RecursionError( (child, node) )
# unless they are children of other direct children...
# TODO, actually do that...
stage = previousStageChildren
removes = []
for current in stage:
currentParents = parents.get( current, [] )
for parent in currentParents:
if parent in stage and parent != current:
# might wind up removing current...
if not current in parents.get(parent, []):
# is not mutually dependent...
removes.append( current )
for remove in removes:
while remove in stage:
stage.remove( remove )
stages.append( stage)
taken.extend( stage )
nodes = filter ( lambda x, l=stage: x not in l, nodes )
if nodelen == len(nodes):
if noRecursion:
raise RecursionError( nodes )
else:
stages.append( nodes[:] )
nodes = []
return stages
def _buildChildrenLists (routes):
childrenTable = {}
parentTable = {}
for sourceID,destinationID in routes:
currentChildren = childrenTable.get( sourceID, [])
currentParents = parentTable.get( destinationID, [])
if not destinationID in currentChildren:
currentChildren.append ( destinationID)
if not sourceID in currentParents:
currentParents.append ( sourceID)
childrenTable[sourceID] = currentChildren
parentTable[destinationID] = currentParents
return childrenTable, parentTable
def toposort (nodes, routes, noRecursion=1):
'''Topological sort from Tim Peters, fairly efficient
in comparison (it seems).'''
#first calculate the recursion depth
dependencies = {}
inversedependencies = {}
if not nodes:
return []
if not routes:
return [nodes]
for node in nodes:
dependencies[ node ] = (0, node)
inversedependencies[ node ] = []
for depended, depends in routes:
# is it a null rule
try:
newdependencylevel, object = dependencies.get ( depends, (0, depends))
except TypeError:
print depends
raise
dependencies[ depends ] = (newdependencylevel + 1, depends)
# "dependency (existence) of depended-on"
newdependencylevel,object = dependencies.get ( depended, (0, depended) )
dependencies[ depended ] = (newdependencylevel, depended)
# Inverse dependency set up
dependencieslist = inversedependencies.get ( depended, [])
dependencieslist.append (depends)
inversedependencies[depended] = dependencieslist
### Now we do the actual sorting
# The first task is to create the sortable
# list of dependency-levels
sortinglist = dependencies.values()
sortinglist.sort ()
output = []
while sortinglist:
deletelist = []
generation = []
output.append( generation)
while sortinglist and sortinglist[0][0] == 0:
number, object = sortinglist[0]
generation.append ( object )
deletelist.append( object )
for inverse in inversedependencies.get(object, () ):
try:
oldcount, inverse = dependencies [ inverse]
if oldcount > 0:
# will be dealt with on later pass
dependencies [ inverse] = (oldcount-1, inverse)
else:
# will be dealt with on this pass,
# so needs not to be in the sorting list next time
deletelist.append( inverse )
# just in case a loop comes through
inversedependencies[object] = []
except KeyError:
# dealing with a recursion-breaking run...
pass
del sortinglist [0]
# if no elements could be deleted, then
# there is something which depends upon itself
if not deletelist:
if noRecursion:
raise RecursionError( sortinglist )
else:
# hack so that something gets deleted...
## import pdb
## pdb.set_trace()
dependencies[sortinglist[0][1]] = (0,sortinglist[0][1])
# delete the items that were dealt with
for item in deletelist:
try:
del dependencies [ item ]
except KeyError:
pass
# need to recreate the sortinglist
sortinglist = dependencies.values()
if not generation:
output.remove( generation )
sortinglist.sort ()
return output
if __name__ == "__main__":
nodes = ['a', 'b', 'c', 'd', 'e', 'f']
route = [('a', 'b'), ('b', 'c'), ('b', 'd'), ('e','f')]
for x in toposort( nodes, route):
for a in x:
print a
raise SystemExit
import pprint, traceback
nodes= [ 0,1,2,3,4,5 ]
testingValues = [
[ (0,1),(1,2),(2,3),(3,4),(4,5)],
[ (0,1),(0,2),(1,2),(3,4),(4,5)],
[
(0,1),
(0,2),
(0,2),
(2,4),
(2,5),
(3,2),
(0,3)],
[
(0,1), # 3-element cycle test, no orphan nodes
(1,2),
(2,0),
(2,4),
(2,5),
(3,2),
(0,3)],
[
(0,1),
(1,1),
(1,1),
(1,4),
(1,5),
(1,2),
(3,1),
(2,1),
(2,0)],
[
(0,1),
(1,0),
(0,2),
(0,3),
],
[
(0,1),
(1,0),
(0,2),
(3,1),
],
]
print 'sort, no recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', sort( nodes, testingValues[index] )
except:
print 'exception raised'
print 'toposort, no recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', toposort( nodes, testingValues[index] )
except:
print 'exception raised'
print 'sort, recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', sort( nodes, testingValues[index],0 )
except:
print 'exception raised'
print 'toposort, recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', toposort( nodes, testingValues[index],0 )
except:
print 'exception raised'

View File

@ -1,45 +0,0 @@
#!/bin/sh
# Used to update http://openlayers.org/dev/
svn up /www/openlayers/docs/dev;
# Get current 'Last Changed Rev'
REV=`svn info /www/openlayers/docs/dev/ | grep 'Last Changed Rev' | awk '{print $4}'`
# Get the last svn rev
touch /tmp/ol_svn_rev
OLD_REV="o`cat /tmp/ol_svn_rev`"
# If they're not equal, do some work.
if [ ! o$REV = $OLD_REV ]; then
cd /www/openlayers/docs/dev/tools/
python exampleparser.py
cd /www/openlayers/docs/dev/build
./build.py
cp OpenLayers.js ..
cd ..
sed -i -e 's!../lib/OpenLayers.js!../OpenLayers.js!' examples/*.html
perl /home/crschmidt/NaturalDocs -i /www/openlayers/docs/dev/lib -o HTML /www/openlayers/dev/apidocs -p /www/openlayers/docs/dev/apidoc_config -s Default OL >/dev/null
perl /home/crschmidt/NaturalDocs -i /www/openlayers/docs/dev/lib -o HTML /www/openlayers/dev/docs -p /www/openlayers/docs/dev/doc_config -s Default OL >/dev/null
# Record the revision
echo -n $REV > /tmp/ol_svn_rev
fi
svn up /www/openlayers/documentation-checkout
REV=`svn info /www/openlayers/documentation-checkout | grep 'Last Changed Rev' | awk '{print $4}'`
# Get the last svn rev
touch /tmp/ol_doc_rev
OLD_REV="o`cat /tmp/ol_doc_rev`"
# If they're not equal, do some work.
if [ ! o$REV = $OLD_REV ]; then
cd /www/openlayers/documentation-checkout
make html > /dev/null
cp -r _build/html/* /www/openlayers/documentation
echo -n $REV > /tmp/ol_doc_rev
fi