text
stringlengths 1.04k
332k
| score
float64 0
0.12
|
---|---|
from common import *
from editorcommon import *
import weakref
class KPEditorObject(KPEditorItem):
SNAP_TO = (24,24)
def __init__(self, obj, layer):
KPEditorItem.__init__(self)
obj.qtItem = self
self._objRef = weakref.ref(obj)
self._layerRef = weakref.ref(layer)
self._updatePosition()
self._updateSize()
self.setAcceptHoverEvents(True)
self.resizing = None
if not hasattr(KPEditorObject, 'SELECTION_PEN'):
KPEditorObject.SELECTION_PEN = QtGui.QPen(Qt.green, 1, Qt.DotLine)
# I don't bother setting the ZValue because it doesn't quite matter:
# only one layer's objects are ever clickable, and drawBackground takes
# care of the layered drawing
def _updatePosition(self):
self.ignoreMovement = True
x,y = self._objRef().position
self.setPos(x*24, y*24)
self.ignoreMovement = False
def _updateSize(self):
self.prepareGeometryChange()
obj = self._objRef()
w,h = obj.size
self._boundingRect = QtCore.QRectF(0, 0, w*24, h*24)
self._selectionRect = QtCore.QRectF(0, 0, w*24-1, h*24-1)
self._resizerEndXY = (w*24-5, h*24-5)
def paint(self, painter, option, widget):
if self.isSelected():
painter.setPen(self.SELECTION_PEN)
painter.drawRect(self._selectionRect)
def hoverMoveEvent(self, event):
if self._layerRef() != KP.mapScene.currentLayer:
self.setCursor(Qt.ArrowCursor)
return
pos = event.pos()
bit = self.resizerPortionAt(pos.x(), pos.y())
if bit == 1 or bit == 4:
self.setCursor(Qt.SizeFDiagCursor)
elif bit == 2 or bit == 3:
self.setCursor(Qt.SizeBDiagCursor)
elif bit == 7 or bit == 8:
self.setCursor(Qt.SizeHorCursor)
elif bit == 5 or bit == 6:
self.setCursor(Qt.SizeVerCursor)
else:
self.setCursor(Qt.ArrowCursor)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
pos = event.pos()
bit = self.resizerPortionAt(pos.x(), pos.y())
if self._layerRef() == KP.mapScene.currentLayer and bit:
# if bit:
event.accept()
x, xSide, y, ySide = False, None, False, None
if bit == 1 or bit == 7 or bit == 3:
x, xSide = True, 1
elif bit == 2 or bit == 4 or bit == 8:
x, xSide = True, 0
if bit == 1 or bit == 2 or bit == 5:
y, ySide = True, 1
elif bit == 3 or bit == 4 or bit == 6:
y, ySide = True, 0
self.resizing = (x, xSide, y, ySide)
return
KPEditorItem.mousePressEvent(self, event)
def _tryAndResize(self, obj, axisIndex, mousePosition, stationarySide):
objPosition = obj.position[axisIndex]
objSize = obj.size[axisIndex]
if stationarySide == 0:
# Resize the right/bottom side
relativeMousePosition = mousePosition - objPosition
newSize = relativeMousePosition + 1
if newSize == objSize or newSize < 1:
return False
if axisIndex == 1:
obj.size = (obj.size[0], newSize)
else:
obj.size = (newSize, obj.size[1])
else:
# Resize the left/top side
rightSide = objPosition + objSize - 1
newLeftSide = mousePosition
newPosition = newLeftSide
newSize = rightSide - newLeftSide + 1
if newSize < 1:
return False
if newPosition == objPosition and newSize == objSize:
return False
if axisIndex == 1:
obj.position = (obj.position[0], newPosition)
obj.size = (obj.size[0], newSize)
else:
obj.position = (newPosition, obj.position[1])
obj.size = (newSize, obj.size[1])
return True
def mouseMoveEvent(self, event):
if self.resizing:
obj = self._objRef()
scenePos = event.scenePos()
hasChanged = False
resizeX, xSide, resizeY, ySide = self.resizing
if resizeX:
hasChanged |= self._tryAndResize(obj, 0, int(scenePos.x() / 24), xSide)
if resizeY:
hasChanged |= self._tryAndResize(obj, 1, int(scenePos.y() / 24), ySide)
if hasChanged:
obj.updateCache()
self._layerRef().updateCache()
self._updatePosition()
self._updateSize()
else:
KPEditorItem.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
if self.resizing and event.button() == Qt.LeftButton:
self.resizing = None
else:
KPEditorItem.mouseReleaseEvent(self, event)
def _itemMoved(self, oldX, oldY, newX, newY):
obj = self._objRef()
obj.position = (newX/24, newY/24)
self._layerRef().updateCache()
def remove(self, withItem=False):
obj = self._objRef()
layer = self._layerRef()
layer.objects.remove(obj)
layer.updateCache()
if withItem:
self.scene().removeItem(self)
| 0.03631 |
import cloud_detection_new as cloud_detection
from matplotlib import pyplot as plt
import views
from skimage import exposure
nir = cloud_detection.get_nir()[0:600,2000:2600]
red = cloud_detection.get_red()[0:600,2000:2600]
green = cloud_detection.get_green()[0:600,2000:2600]
blue = cloud_detection.get_blue()[0:600,2000:2600] # or use coastal
coastal = cloud_detection.get_coastal()[0:600,2000:2600]
marine_shadow_index = (green-blue)/(green+blue)
img = views.create_composite(red, green, blue)
img_rescale = exposure.rescale_intensity(img, in_range=(0, 90))
plt.rcParams['savefig.facecolor'] = "0.8"
vmin, vmax=0.0,0.1
def example_plot(ax, data, fontsize=12):
ax.imshow(data, vmin=vmin, vmax=vmax)
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig = plt.figure
ax1=plt.subplot(243)
ax2=plt.subplot(244)
ax3=plt.subplot(247)
ax4=plt.subplot(248)
ax5=plt.subplot(121)
a_coastal = coastal[500:600, 500:600]
a_blue = blue[500:600, 500:600]
a_green = green[500:600, 500:600]
a_red = red[500:600, 500:600]
a_nir = nir[500:600, 500:600]
a_img = img[500:600, 500:600]
spec1 = [a_coastal[60, 60], a_blue[60, 60], a_green[60, 60], a_red[60, 60], a_nir[60, 60]]
b_coastal = coastal[200:300, 100:200]
b_blue = blue[200:300, 100:200]
b_green = green[200:300, 100:200]
b_red = red[200:300, 100:200]
b_nir = nir[200:300, 100:200]
b_img = img[200:300, 100:200]
example_plot(ax1, coastal)
example_plot(ax2, blue)
example_plot(ax3, green)
example_plot(ax4, red)
ax5.imshow(img)
# plt.tight_layout()
plt.close('all')
spec = [b_coastal[60, 60], b_blue[60, 60], b_green[60, 60], b_red[60, 60], b_nir[60, 60]]
plt.plot(spec, 'k*-')
plt.plot(spec1, 'k.-')
plt.close('all')
cbg = (coastal+blue+green)/3
plt.imshow(cbg/red) | 0.012876 |
#!/usr/bin/env python
__author__ = 'saguinag' + '@' + 'nd.edu'
__version__ = "0.1.0"
##
## fname "b2CliqueTreeRules.py"
##
## TODO: some todo list
## VersionLog:
import net_metrics as metrics
import pandas as pd
import argparse, traceback
import os, sys
import networkx as nx
import re
from collections import deque, defaultdict, Counter
import tree_decomposition as td
import PHRG as phrg
import probabilistic_cfg as pcfg
import exact_phrg as xphrg
import a1_hrg_cliq_tree as nfld
from a1_hrg_cliq_tree import load_edgelist
DEBUG = False
def get_parser ():
parser = argparse.ArgumentParser(description='b2CliqueTreeRules.py: given a tree derive grammar rules')
parser.add_argument('-t', '--treedecomp', required=True, help='input tree decomposition (dimacs file format)')
parser.add_argument('--version', action='version', version=__version__)
return parser
def dimacs_td_ct (tdfname):
""" tree decomp to clique-tree """
print '... input file:', tdfname
fname = tdfname
graph_name = os.path.basename(fname)
gname = graph_name.split('.')[0]
gfname = "datasets/out." + gname
tdh = os.path.basename(fname).split('.')[1] # tree decomp heuristic
tfname = gname+"."+tdh
G = load_edgelist(gfname)
if DEBUG: print nx.info(G)
print
with open(fname, 'r') as f: # read tree decomp from inddgo
lines = f.readlines()
lines = [x.rstrip('\r\n') for x in lines]
cbags = {}
bags = [x.split() for x in lines if x.startswith('B')]
for b in bags:
cbags[int(b[1])] = [int(x) for x in b[3:]] # what to do with bag size?
edges = [x.split()[1:] for x in lines if x.startswith('e')]
edges = [[int(k) for k in x] for x in edges]
tree = defaultdict(set)
for s, t in edges:
tree[frozenset(cbags[s])].add(frozenset(cbags[t]))
if DEBUG: print '.. # of keys in `tree`:', len(tree.keys())
if DEBUG: print tree.keys()
root = list(tree)[0]
if DEBUG: print '.. Root:', root
root = frozenset(cbags[1])
if DEBUG: print '.. Root:', root
T = td.make_rooted(tree, root)
if DEBUG: print '.. T rooted:', len(T)
# nfld.unfold_2wide_tuple(T) # lets me display the tree's frozen sets
T = phrg.binarize(T)
prod_rules = {}
td.new_visit(T, G, prod_rules)
if DEBUG: print "--------------------"
if DEBUG: print "- Production Rules -"
if DEBUG: print "--------------------"
for k in prod_rules.iterkeys():
if DEBUG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts.
if DEBUG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DEBUG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
outdf_fname = "./ProdRules/"+tfname+".prules"
if not os.path.isfile(outdf_fname+".bz2"):
print '...',outdf_fname, "written"
df.to_csv(outdf_fname+".bz2", compression="bz2")
else:
print '...', outdf_fname, "file exists"
return
def main ():
parser = get_parser()
args = vars(parser.parse_args())
dimacs_td_ct(args['treedecomp']) # gen synth graph
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| 0.024657 |
from itertools import chain
from django.utils.itercompat import is_iterable
class Tags:
"""
Built-in tags for internal checks.
"""
admin = 'admin'
caches = 'caches'
compatibility = 'compatibility'
database = 'database'
models = 'models'
security = 'security'
signals = 'signals'
templates = 'templates'
urls = 'urls'
class CheckRegistry:
def __init__(self):
self.registered_checks = set()
self.deployment_checks = set()
def register(self, check=None, *tags, **kwargs):
"""
Can be used as a function or a decorator. Register given function
`f` labeled with given `tags`. The function should receive **kwargs
and return list of Errors and Warnings.
Example::
registry = CheckRegistry()
@registry.register('mytag', 'anothertag')
def my_check(apps, **kwargs):
# ... perform checks and collect `errors` ...
return errors
# or
registry.register(my_check, 'mytag', 'anothertag')
"""
kwargs.setdefault('deploy', False)
def inner(check):
check.tags = tags
checks = self.deployment_checks if kwargs['deploy'] else self.registered_checks
checks.add(check)
return check
if callable(check):
return inner(check)
else:
if check:
tags += (check, )
return inner
def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False):
"""
Run all registered checks and return list of Errors and Warnings.
"""
errors = []
checks = self.get_checks(include_deployment_checks)
if tags is not None:
checks = [check for check in checks if not set(check.tags).isdisjoint(tags)]
else:
# By default, 'database'-tagged checks are not run as they do more
# than mere static code analysis.
checks = [check for check in checks if Tags.database not in check.tags]
for check in checks:
new_errors = check(app_configs=app_configs)
assert is_iterable(new_errors), (
"The function %r did not return a list. All functions registered "
"with the checks registry must return a list." % check)
errors.extend(new_errors)
return errors
def tag_exists(self, tag, include_deployment_checks=False):
return tag in self.tags_available(include_deployment_checks)
def tags_available(self, deployment_checks=False):
return set(chain.from_iterable(
check.tags for check in self.get_checks(deployment_checks)
))
def get_checks(self, include_deployment_checks=False):
checks = list(self.registered_checks)
if include_deployment_checks:
checks.extend(self.deployment_checks)
return checks
registry = CheckRegistry()
register = registry.register
run_checks = registry.run_checks
tag_exists = registry.tag_exists
| 0.001609 |
#!/usr/bin/env python
"""
Get rid of lines containing duplicate copies of the same atom in the "Atoms"
section of a LAMMPS data file. Duplicate lines which occur later are
preserved and the earlier lines are erased.
The file is read from sys.stdin. This program does not parse the entire
data file. The text from the "Atoms" section of the LAMMPS file must
be extracted in advance before it is sent to this program.)
"""
import sys
def main():
in_stream = sys.stdin
f = None
fname = None
if len(sys.argv) == 2:
fname = sys.argv[1]
f = open(fname, 'r')
in_stream = f
atom_ids_in_use = set([])
lines = in_stream.readlines()
# Start at the end of the file and read backwards.
# If duplicate lines exist, eliminate the ones that occur earlier in the file.
i = len(lines)
while i > 0:
i -= 1
line_orig = lines[i]
line = line_orig.rstrip('\n')
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
tokens = line.strip().split()
if len(tokens) > 0:
atom_id = tokens[0]
if atom_id in atom_ids_in_use:
del lines[i]
else:
atom_ids_in_use.add(atom_id)
else:
del lines[i]
for line in lines:
sys.stdout.write(line)
if f != None:
f.close()
return
if __name__ == '__main__':
main()
| 0.002727 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit build' tool along with integration for this tool with the
SCons build system.
'''
import filecmp
import getopt
import os
import shutil
import sys
from grit import grd_reader
from grit import util
from grit.tool import interface
from grit import shortcuts
# It would be cleaner to have each module register itself, but that would
# require importing all of them on every run of GRIT.
'''Map from <output> node types to modules under grit.format.'''
_format_modules = {
'android': 'android_xml',
'c_format': 'c_format',
'chrome_messages_json': 'chrome_messages_json',
'data_package': 'data_pack',
'js_map_format': 'js_map_format',
'rc_all': 'rc',
'rc_translateable': 'rc',
'rc_nontranslateable': 'rc',
'rc_header': 'rc_header',
'resource_map_header': 'resource_map',
'resource_map_source': 'resource_map',
'resource_file_map_source': 'resource_map',
}
_format_modules.update(
(type, 'policy_templates.template_formatter') for type in
[ 'adm', 'admx', 'adml', 'reg', 'doc', 'json',
'plist', 'plist_strings', 'ios_plist', 'android_policy' ])
def GetFormatter(type):
modulename = 'grit.format.' + _format_modules[type]
__import__(modulename)
module = sys.modules[modulename]
try:
return module.Format
except AttributeError:
return module.GetFormatter(type)
class RcBuilder(interface.Tool):
'''A tool that builds RC files and resource header files for compilation.
Usage: grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*
All output options for this tool are specified in the input file (see
'grit help' for details on how to specify the input file - it is a global
option).
Options:
-a FILE Assert that the given file is an output. There can be
multiple "-a" flags listed for multiple outputs. If a "-a"
or "--assert-file-list" argument is present, then the list
of asserted files must match the output files or the tool
will fail. The use-case is for the build system to maintain
separate lists of output files and to catch errors if the
build system's list and the grit list are out-of-sync.
--assert-file-list Provide a file listing multiple asserted output files.
There is one file name per line. This acts like specifying
each file with "-a" on the command line, but without the
possibility of running into OS line-length limits for very
long lists.
-o OUTPUTDIR Specify what directory output paths are relative to.
Defaults to the current directory.
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
-f FIRSTIDSFILE Path to a python file that specifies the first id of
value to use for resources. A non-empty value here will
override the value specified in the <grit> node's
first_ids_file.
-w WHITELISTFILE Path to a file containing the string names of the
resources to include. Anything not listed is dropped.
-t PLATFORM Specifies the platform the build is targeting; defaults
to the value of sys.platform. The value provided via this
flag should match what sys.platform would report for your
target platform; see grit.node.base.EvaluateCondition.
-h HEADERFORMAT Custom format string to use for generating rc header files.
The string should have two placeholders: {textual_id}
and {numeric_id}. E.g. "#define {textual_id} {numeric_id}"
Otherwise it will use the default "#define SYMBOL 1234"
--output-all-resource-defines
--no-output-all-resource-defines If specified, overrides the value of the
output_all_resource_defines attribute of the root <grit>
element of the input .grd file.
--write-only-new flag
If flag is non-0, write output files to a temporary file
first, and copy it to the real output only if the new file
is different from the old file. This allows some build
systems to realize that dependent build steps might be
unnecessary, at the cost of comparing the output data at
grit time.
--depend-on-stamp
If specified along with --depfile and --depdir, the depfile
generated will depend on a stampfile instead of the first
output in the input .grd file.
Conditional inclusion of resources only affects the output of files which
control which resources get linked into a binary, e.g. it affects .rc files
meant for compilation but it does not affect resource header files (that define
IDs). This helps ensure that values of IDs stay the same, that all messages
are exported to translation interchange files (e.g. XMB files), etc.
'''
def ShortDescription(self):
return 'A tool that builds RC files for compilation.'
def Run(self, opts, args):
self.output_directory = '.'
first_ids_file = None
whitelist_filenames = []
assert_output_files = []
target_platform = None
depfile = None
depdir = None
rc_header_format = None
output_all_resource_defines = None
write_only_new = False
depend_on_stamp = False
(own_opts, args) = getopt.getopt(args, 'a:o:D:E:f:w:t:h:',
('depdir=','depfile=','assert-file-list=',
'output-all-resource-defines',
'no-output-all-resource-defines',
'depend-on-stamp',
'write-only-new='))
for (key, val) in own_opts:
if key == '-a':
assert_output_files.append(val)
elif key == '--assert-file-list':
with open(val) as f:
assert_output_files += f.read().splitlines()
elif key == '-o':
self.output_directory = val
elif key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
elif key == '-f':
# TODO([email protected]): Remove this override once change
# lands in WebKit.grd to specify the first_ids_file in the
# .grd itself.
first_ids_file = val
elif key == '-w':
whitelist_filenames.append(val)
elif key == '--output-all-resource-defines':
output_all_resource_defines = True
elif key == '--no-output-all-resource-defines':
output_all_resource_defines = False
elif key == '-t':
target_platform = val
elif key == '-h':
rc_header_format = val
elif key == '--depdir':
depdir = val
elif key == '--depfile':
depfile = val
elif key == '--write-only-new':
write_only_new = val != '0'
elif key == '--depend-on-stamp':
depend_on_stamp = True
if len(args):
print 'This tool takes no tool-specific arguments.'
return 2
self.SetOptions(opts)
if self.scons_targets:
self.VerboseOut('Using SCons targets to identify files to output.\n')
else:
self.VerboseOut('Output directory: %s (absolute path: %s)\n' %
(self.output_directory,
os.path.abspath(self.output_directory)))
if whitelist_filenames:
self.whitelist_names = set()
for whitelist_filename in whitelist_filenames:
self.VerboseOut('Using whitelist: %s\n' % whitelist_filename);
whitelist_contents = util.ReadFile(whitelist_filename, util.RAW_TEXT)
self.whitelist_names.update(whitelist_contents.strip().split('\n'))
self.write_only_new = write_only_new
self.res = grd_reader.Parse(opts.input,
debug=opts.extra_verbose,
first_ids_file=first_ids_file,
defines=self.defines,
target_platform=target_platform)
# If the output_all_resource_defines option is specified, override the value
# found in the grd file.
if output_all_resource_defines is not None:
self.res.SetShouldOutputAllResourceDefines(output_all_resource_defines)
# Set an output context so that conditionals can use defines during the
# gathering stage; we use a dummy language here since we are not outputting
# a specific language.
self.res.SetOutputLanguage('en')
if rc_header_format:
self.res.AssignRcHeaderFormat(rc_header_format)
self.res.RunGatherers()
self.Process()
if assert_output_files:
if not self.CheckAssertedOutputFiles(assert_output_files):
return 2
if depfile and depdir:
self.GenerateDepfile(depfile, depdir, first_ids_file, depend_on_stamp)
return 0
def __init__(self, defines=None):
# Default file-creation function is built-in open(). Only done to allow
# overriding by unit test.
self.fo_create = open
# key/value pairs of C-preprocessor like defines that are used for
# conditional output of resources
self.defines = defines or {}
# self.res is a fully-populated resource tree if Run()
# has been called, otherwise None.
self.res = None
# Set to a list of filenames for the output nodes that are relative
# to the current working directory. They are in the same order as the
# output nodes in the file.
self.scons_targets = None
# The set of names that are whitelisted to actually be included in the
# output.
self.whitelist_names = None
# Whether to compare outputs to their old contents before writing.
self.write_only_new = False
@staticmethod
def AddWhitelistTags(start_node, whitelist_names):
# Walk the tree of nodes added attributes for the nodes that shouldn't
# be written into the target files (skip markers).
from grit.node import include
from grit.node import message
from grit.node import structure
for node in start_node:
# Same trick data_pack.py uses to see what nodes actually result in
# real items.
if (isinstance(node, include.IncludeNode) or
isinstance(node, message.MessageNode) or
isinstance(node, structure.StructureNode)):
text_ids = node.GetTextualIds()
# Mark the item to be skipped if it wasn't in the whitelist.
if text_ids and text_ids[0] not in whitelist_names:
node.SetWhitelistMarkedAsSkip(True)
@staticmethod
def ProcessNode(node, output_node, outfile):
'''Processes a node in-order, calling its formatter before and after
recursing to its children.
Args:
node: grit.node.base.Node subclass
output_node: grit.node.io.OutputNode
outfile: open filehandle
'''
base_dir = util.dirname(output_node.GetOutputFilename())
formatter = GetFormatter(output_node.GetType())
formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)
outfile.writelines(formatted)
def Process(self):
# Update filenames with those provided by SCons if we're being invoked
# from SCons. The list of SCons targets also includes all <structure>
# node outputs, but it starts with our output files, in the order they
# occur in the .grd
if self.scons_targets:
assert len(self.scons_targets) >= len(self.res.GetOutputFiles())
outfiles = self.res.GetOutputFiles()
for ix in range(len(outfiles)):
outfiles[ix].output_filename = os.path.abspath(
self.scons_targets[ix])
else:
for output in self.res.GetOutputFiles():
output.output_filename = os.path.abspath(os.path.join(
self.output_directory, output.GetFilename()))
# If there are whitelisted names, tag the tree once up front, this way
# while looping through the actual output, it is just an attribute check.
if self.whitelist_names:
self.AddWhitelistTags(self.res, self.whitelist_names)
for output in self.res.GetOutputFiles():
self.VerboseOut('Creating %s...' % output.GetFilename())
# Microsoft's RC compiler can only deal with single-byte or double-byte
# files (no UTF-8), so we make all RC files UTF-16 to support all
# character sets.
if output.GetType() in ('rc_header', 'resource_map_header',
'resource_map_source', 'resource_file_map_source'):
encoding = 'cp1252'
elif output.GetType() in ('android', 'c_format', 'js_map_format', 'plist',
'plist_strings', 'doc', 'json', 'android_policy'):
encoding = 'utf_8'
elif output.GetType() in ('chrome_messages_json'):
# Chrome Web Store currently expects BOM for UTF-8 files :-(
encoding = 'utf-8-sig'
else:
# TODO(gfeher) modify here to set utf-8 encoding for admx/adml
encoding = 'utf_16'
# Set the context, for conditional inclusion of resources
self.res.SetOutputLanguage(output.GetLanguage())
self.res.SetOutputContext(output.GetContext())
self.res.SetFallbackToDefaultLayout(output.GetFallbackToDefaultLayout())
self.res.SetDefines(self.defines)
# Make the output directory if it doesn't exist.
self.MakeDirectoriesTo(output.GetOutputFilename())
# Write the results to a temporary file and only overwrite the original
# if the file changed. This avoids unnecessary rebuilds.
outfile = self.fo_create(output.GetOutputFilename() + '.tmp', 'wb')
if output.GetType() != 'data_package':
outfile = util.WrapOutputStream(outfile, encoding)
# Iterate in-order through entire resource tree, calling formatters on
# the entry into a node and on exit out of it.
with outfile:
self.ProcessNode(self.res, output, outfile)
# Now copy from the temp file back to the real output, but on Windows,
# only if the real output doesn't exist or the contents of the file
# changed. This prevents identical headers from being written and .cc
# files from recompiling (which is painful on Windows).
if not os.path.exists(output.GetOutputFilename()):
os.rename(output.GetOutputFilename() + '.tmp',
output.GetOutputFilename())
else:
# CHROMIUM SPECIFIC CHANGE.
# This clashes with gyp + vstudio, which expect the output timestamp
# to change on a rebuild, even if nothing has changed, so only do
# it when opted in.
if not self.write_only_new:
write_file = True
else:
files_match = filecmp.cmp(output.GetOutputFilename(),
output.GetOutputFilename() + '.tmp')
write_file = not files_match
if write_file:
shutil.copy2(output.GetOutputFilename() + '.tmp',
output.GetOutputFilename())
os.remove(output.GetOutputFilename() + '.tmp')
self.VerboseOut(' done.\n')
# Print warnings if there are any duplicate shortcuts.
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(
self.res.UberClique(), self.res.GetTcProject())
if warnings:
print '\n'.join(warnings)
# Print out any fallback warnings, and missing translation errors, and
# exit with an error code if there are missing translations in a non-pseudo
# and non-official build.
warnings = (self.res.UberClique().MissingTranslationsReport().
encode('ascii', 'replace'))
if warnings:
self.VerboseOut(warnings)
if self.res.UberClique().HasMissingTranslations():
print self.res.UberClique().missing_translations_
sys.exit(-1)
def CheckAssertedOutputFiles(self, assert_output_files):
'''Checks that the asserted output files are specified in the given list.
Returns true if the asserted files are present. If they are not, returns
False and prints the failure.
'''
# Compare the absolute path names, sorted.
asserted = sorted([os.path.abspath(i) for i in assert_output_files])
actual = sorted([
os.path.abspath(os.path.join(self.output_directory, i.GetFilename()))
for i in self.res.GetOutputFiles()])
if asserted != actual:
missing = list(set(actual) - set(asserted))
extra = list(set(asserted) - set(actual))
error = '''Asserted file list does not match.
Expected output files:
%s
Actual output files:
%s
Missing output files:
%s
Extra output files:
%s
'''
print error % ('\n'.join(asserted), '\n'.join(actual), '\n'.join(missing),
'\n'.join(extra))
return False
return True
def GenerateDepfile(self, depfile, depdir, first_ids_file, depend_on_stamp):
'''Generate a depfile that contains the imlicit dependencies of the input
grd. The depfile will be in the same format as a makefile, and will contain
references to files relative to |depdir|. It will be put in |depfile|.
For example, supposing we have three files in a directory src/
src/
blah.grd <- depends on input{1,2}.xtb
input1.xtb
input2.xtb
and we run
grit -i blah.grd -o ../out/gen --depdir ../out --depfile ../out/gen/blah.rd.d
from the directory src/ we will generate a depfile ../out/gen/blah.grd.d
that has the contents
gen/blah.h: ../src/input1.xtb ../src/input2.xtb
Where "gen/blah.h" is the first output (Ninja expects the .d file to list
the first output in cases where there is more than one). If the flag
--depend-on-stamp is specified, "gen/blah.rd.d.stamp" will be used that is
'touched' whenever a new depfile is generated.
Note that all paths in the depfile are relative to ../out, the depdir.
'''
depfile = os.path.abspath(depfile)
depdir = os.path.abspath(depdir)
infiles = self.res.GetInputFiles()
# We want to trigger a rebuild if the first ids change.
if first_ids_file is not None:
infiles.append(first_ids_file)
if (depend_on_stamp):
output_file = depfile + ".stamp"
# Touch the stamp file before generating the depfile.
with open(output_file, 'a'):
os.utime(output_file, None)
else:
# Get the first output file relative to the depdir.
outputs = self.res.GetOutputFiles()
output_file = os.path.join(self.output_directory,
outputs[0].GetFilename())
output_file = os.path.relpath(output_file, depdir)
# The path prefix to prepend to dependencies in the depfile.
prefix = os.path.relpath(os.getcwd(), depdir)
deps_text = ' '.join([os.path.join(prefix, i) for i in infiles])
depfile_contents = output_file + ': ' + deps_text
self.MakeDirectoriesTo(depfile)
outfile = self.fo_create(depfile, 'wb')
outfile.writelines(depfile_contents)
@staticmethod
def MakeDirectoriesTo(file):
'''Creates directories necessary to contain |file|.'''
dir = os.path.split(file)[0]
if not os.path.exists(dir):
os.makedirs(dir)
| 0.006326 |
from __future__ import print_function
import os, string, tempfile, shutil
from subprocess import Popen
from ase.io import write
from ase.units import Bohr
class Bader:
'''class for running bader analysis and extracting data from it.
The class runs bader, extracts the charge density and outputs it
to a cube file. Then you call different functions of the class to
extract the charges, volumes, etc...
ACF.dat contains the coordinates of each atom, the charge
associated with it according to Bader partitioning, percentage of
the whole according to Bader partitioning and the minimum distance
to the surface. This distance should be compared to maximum
cut-off radius for the core region if pseudo potentials have been
used.
BCF.dat contains the coordinates of each Bader maxima, the charge
within that volume, the nearest atom and the distance to that
atom.
AtomVolumes.dat contains the number of each volume that has been
assigned to each atom. These numbers correspond to the number of
the BvAtxxxx.dat files.
The options for the executable are::
bader [ -c bader | voronoi ]
[ -n bader | voronoi ]
[ -b neargrid | ongrid ]
[ -r refine_edge_iterations ]
[ -ref reference_charge ]
[ -p all_atom | all_bader ]
[ -p sel_atom | sel_bader ] [volume list]
[ -p atom_index | bader_index ]
[ -i cube | chgcar ]
[ -h ] [ -v ]
chargefile
References:
G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust
algorithm for Bader decomposition of charge density,
Comput. Mater. Sci. 36 254-360 (2006).
E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved
grid-based algorithm for Bader charge allocation,
J. Comp. Chem. 28 899-908 (2007).
W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis
algorithm without lattice bias, J. Phys.: Condens. Matter 21
084204 (2009).
'''
def __init__(self, atoms):
'''
'''
self.atoms = atoms
#get density and write cube file
calc = atoms.get_calculator()
ncfile = calc.get_nc()
base, ext = os.path.splitext(ncfile)
x, y, z, density = calc.get_charge_density()
cubefile = base + '_charge_density.cube'
self.densityfile = cubefile
if not os.path.exists(cubefile):
write(cubefile, atoms, data=density * Bohr ** 3)
#cmd to run for bader analysis. check if output exists so we
#don't run this too often.
acf_file = base + '_ACF.dat'
if not os.path.exists(acf_file):
#mk tempdir
tempdir = tempfile.mkdtemp()
cwd = os.getcwd()
abscubefile = os.path.abspath(cubefile)
os.chdir(tempdir)
cmd = 'bader %s' % abscubefile
process = Popen(cmd)
status = Popen.wait()
if status != 0:
print(process)
shutil.copy2('ACF.dat', os.path.join(cwd, acf_file))
os.chdir(cwd)
shutil.rmtree(tempdir)
self.charges = []
self.volumes = []
#now parse the output
f = open(acf_file, 'r')
#skip 2 lines
f.readline()
f.readline()
for i, atom in enumerate(self.atoms):
line = f.readline()
fields = line.split()
n = int(fields[0])
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
chg = float(fields[4])
mindist = float(fields[5])
vol = float(fields[6])
self.charges.append(chg)
self.volumes.append(vol)
f.close()
def get_bader_charges(self):
return self.charges
def get_bader_volumes(self):
'return volumes in Ang**3'
return [x * Bohr ** 3 for x in self.volumes]
def write_atom_volume(self, atomlist):
'''write bader atom volumes to cube files.
atomlist = [0,2] #for example
-p sel_atom Write the selected atomic volumes, read from the
subsequent list of volumes.
'''
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_atom %s %s' % (alist, self.densityfile)
print(cmd)
os.system(cmd)
def write_bader_volume(self, atomlist):
"""write bader atom volumes to cube files.
::
atomlist = [0,2] # for example
-p sel_bader Write the selected Bader volumes, read from the
subsequent list of volumes.
"""
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_bader %s %s' % (alist, self.densityfile)
print(cmd)
os.system(cmd)
def write_atom_index(self):
''' -p atom_index Write the atomic volume index to a charge
density file.
'''
cmd = 'bader -p atom_index %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_bader_index(self):
'''
-p bader_index Write the Bader volume index to a charge
density file.
'''
cmd = 'bader -p bader_index %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_all_atom(self):
'''
-p all_atom Combine all volumes associated with an atom and
write to file. This is done for all atoms and written to files
named BvAtxxxx.dat. The volumes associated with atoms are
those for which the maximum in charge density within the
volume is closest to the atom.
'''
cmd = 'bader -p all_atom %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_all_bader(self):
'''
-p all_bader Write all Bader volumes (containing charge above
threshold of 0.0001) to a file. The charge distribution in
each volume is written to a separate file, named
Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE
file format, depending on the format of the initial charge
density file. These files can be quite large, so this option
should be used with caution.
'''
cmd = 'bader -p all_bader %s' % (self.densityfile)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
from ase.calculators.jacapo import Jacapo
atoms = Jacapo.read_atoms('ethylene.nc')
b = Bader(atoms)
print(b.get_bader_charges())
print(b.get_bader_volumes())
b.write_atom_volume([3, 4])
| 0.002669 |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Find the most recent tombstone file(s) on all connected devices
# and prints their stacks.
#
# Assumes tombstone file was created with current symbols.
import datetime
import logging
import multiprocessing
import os
import subprocess
import sys
import optparse
from pylib import android_commands
def _ListTombstones(adb):
"""List the tombstone files on the device.
Args:
adb: An instance of AndroidCommands.
Yields:
Tuples of (tombstone filename, date time of file on device).
"""
lines = adb.RunShellCommand('TZ=UTC su -c ls -a -l /data/tombstones')
for line in lines:
if 'tombstone' in line and not 'No such file or directory' in line:
details = line.split()
t = datetime.datetime.strptime(details[-3] + ' ' + details[-2],
'%Y-%m-%d %H:%M')
yield details[-1], t
def _GetDeviceDateTime(adb):
"""Determine the date time on the device.
Args:
adb: An instance of AndroidCommands.
Returns:
A datetime instance.
"""
device_now_string = adb.RunShellCommand('TZ=UTC date')
return datetime.datetime.strptime(
device_now_string[0], '%a %b %d %H:%M:%S %Z %Y')
def _GetTombstoneData(adb, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return adb.GetProtectedFileContents('/data/tombstones/' + tombstone_file)
def _EraseTombstone(adb, tombstone_file):
"""Deletes a tombstone from the device.
Args:
tombstone_file: the tombstone to delete.
"""
return adb.RunShellCommandWithSU('rm /data/tombstones/' + tombstone_file)
def _ResolveSymbols(tombstone_data, include_stack):
"""Run the stack tool for given tombstone input.
Args:
tombstone_data: a list of strings of tombstone data.
include_stack: boolean whether to include stack data in output.
Yields:
A string for each line of resolved stack output.
"""
stack_tool = os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'android_platform', 'development',
'scripts', 'stack')
proc = subprocess.Popen(stack_tool, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output = proc.communicate(input='\n'.join(tombstone_data))[0]
for line in output.split('\n'):
if not include_stack and 'Stack Data:' in line:
break
yield line
def _ResolveTombstone(tombstone):
lines = []
lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) +
', about this long ago: ' +
(str(tombstone['device_now'] - tombstone['time']) +
' Device: ' + tombstone['serial'])]
print '\n'.join(lines)
print 'Resolving...'
lines += _ResolveSymbols(tombstone['data'], tombstone['stack'])
return lines
def _ResolveTombstones(jobs, tombstones):
"""Resolve a list of tombstones.
Args:
jobs: the number of jobs to use with multiprocess.
tombstones: a list of tombstones.
"""
if not tombstones:
print 'No device attached? Or no tombstones?'
return
if len(tombstones) == 1:
data = _ResolveTombstone(tombstones[0])
else:
pool = multiprocessing.Pool(processes=jobs)
data = pool.map(_ResolveTombstone, tombstones)
data = ['\n'.join(d) for d in data]
print '\n'.join(data)
def _GetTombstonesForDevice(adb, options):
"""Returns a list of tombstones on a given adb connection.
Args:
adb: An instance of Androidcommands.
options: command line arguments from OptParse
"""
ret = []
all_tombstones = list(_ListTombstones(adb))
if not all_tombstones:
print 'No device attached? Or no tombstones?'
return ret
# Sort the tombstones in date order, descending
all_tombstones.sort(cmp=lambda a, b: cmp(b[1], a[1]))
# Only resolve the most recent unless --all-tombstones given.
tombstones = all_tombstones if options.all_tombstones else [all_tombstones[0]]
device_now = _GetDeviceDateTime(adb)
for tombstone_file, tombstone_time in tombstones:
ret += [{'serial': adb.Adb().GetSerialNumber(),
'device_now': device_now,
'time': tombstone_time,
'file': tombstone_file,
'stack': options.stack,
'data': _GetTombstoneData(adb, tombstone_file)}]
# Erase all the tombstones if desired.
if options.wipe_tombstones:
for tombstone_file, _ in all_tombstones:
_EraseTombstone(adb, tombstone_file)
return ret
def main():
parser = optparse.OptionParser()
parser.add_option('--device',
help='The serial number of the device. If not specified '
'will use all devices.')
parser.add_option('-a', '--all-tombstones', action='store_true',
help="""Resolve symbols for all tombstones, rather than just
the most recent""")
parser.add_option('-s', '--stack', action='store_true',
help='Also include symbols for stack data')
parser.add_option('-w', '--wipe-tombstones', action='store_true',
help='Erase all tombstones from device after processing')
parser.add_option('-j', '--jobs', type='int',
default=4,
help='Number of jobs to use when processing multiple '
'crash stacks.')
options, args = parser.parse_args()
if options.device:
devices = [options.device]
else:
devices = android_commands.GetAttachedDevices()
tombstones = []
for device in devices:
adb = android_commands.AndroidCommands(device)
tombstones += _GetTombstonesForDevice(adb, options)
_ResolveTombstones(options.jobs, tombstones)
if __name__ == '__main__':
sys.exit(main())
| 0.010583 |
#!/usr/bin/env python
"""
Render Django templates.
Useful for generating fixtures for the JavaScript unit test suite.
Usage:
python render_templates.py path/to/templates.json
where "templates.json" is a JSON file of the form:
[
{
"template": "openassessmentblock/oa_base.html",
"context": {
"title": "Lorem",
"question": "Ipsum?"
},
"output": "oa_base.html"
},
...
]
The rendered templates are saved to "output" relative to the
templates.json file's directory.
"""
import sys
import os.path
import json
import re
import dateutil.parser
import pytz
# This is a bit of a hack to ensure that the root repo directory
# is in the Python path, so Django can find the settings module.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from django.template.context import Context
from django.template.loader import get_template
USAGE = u"{prog} TEMPLATE_DESC"
DATETIME_REGEX = re.compile("^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}$")
def parse_dates(context):
"""
Transform datetime strings into Python datetime objects.
JSON does not provide a standard way to serialize datetime objects,
but some of the templates expect that the context contains
Python datetime objects.
This (somewhat hacky) solution recursively searches the context
for formatted datetime strings of the form "2014-01-02T12:34"
and converts them to Python datetime objects with the timezone
set to UTC.
Args:
context (JSON-serializable): The context (or part of the context)
that will be passed to the template. Dictionaries and lists
will be recursively searched and transformed.
Returns:
JSON-serializable of the same type as the `context` argument.
"""
if isinstance(context, dict):
return {
key: parse_dates(value)
for key, value in context.iteritems()
}
elif isinstance(context, list):
return [
parse_dates(item)
for item in context
]
elif isinstance(context, basestring):
if DATETIME_REGEX.match(context) is not None:
return dateutil.parser.parse(context).replace(tzinfo=pytz.utc)
return context
def render_templates(root_dir, template_json):
"""
Create rendered templates.
Args:
root_dir (str): The directory in which to write the rendered templates.
template_json (dict): Description of which templates to render. Must be a list
of dicts, each containing keys "template" (str), "context" (dict), and "output" (str).
Returns:
None
"""
for template_dict in template_json:
template = get_template(template_dict['template'])
context = parse_dates(template_dict['context'])
rendered = template.render(Context(context))
output_path = os.path.join(root_dir, template_dict['output'])
try:
with open(output_path, 'w') as output_file:
output_file.write(rendered.encode('utf-8'))
except IOError:
print "Could not write rendered template to file: {}".format(output_path)
sys.exit(1)
def main():
"""
Main entry point for the script.
"""
if len(sys.argv) < 2:
print USAGE.format(sys.argv[0])
sys.exit(1)
try:
with open(sys.argv[1]) as template_json:
root_dir = os.path.dirname(sys.argv[1])
render_templates(root_dir, json.load(template_json))
except IOError as ex:
print u"Could not open template description file: {}".format(sys.argv[1])
print(ex)
sys.exit(1)
except ValueError as ex:
print u"Could not parse template description as JSON: {}".format(sys.argv[1])
print(ex)
sys.exit(1)
if __name__ == '__main__':
main()
| 0.003323 |
#!/usr/bin/env python
"""Node Server Example
This example demonstrates how to create a very simple node server
that supports bi-diractional messaging between server and connected
clients forming a cluster of nodes.
"""
from __future__ import print_function
from os import getpid
from optparse import OptionParser
from circuits.node import Node
from circuits import Component, Debugger
__version__ = "0.0.1"
USAGE = "%prog [options]"
VERSION = "%prog v" + __version__
def parse_options():
parser = OptionParser(usage=USAGE, version=VERSION)
parser.add_option(
"-b", "--bind",
action="store", type="string",
default="0.0.0.0:8000", dest="bind",
help="Bind to address:[port]"
)
parser.add_option(
"-d", "--debug",
action="store_true",
default=False, dest="debug",
help="Enable debug mode"
)
opts, args = parser.parse_args()
return opts, args
class NodeServer(Component):
def init(self, args, opts):
"""Initialize our ``ChatServer`` Component.
This uses the convenience ``init`` method which is called after the
component is proeprly constructed and initialized and passed the
same args and kwargs that were passed during construction.
"""
self.args = args
self.opts = opts
self.clients = {}
if opts.debug:
Debugger().register(self)
if ":" in opts.bind:
address, port = opts.bind.split(":")
port = int(port)
else:
address, port = opts.bind, 8000
Node(port=port, server_ip=address).register(self)
def connect(self, sock, host, port):
"""Connect Event -- Triggered for new connecting clients"""
self.clients[sock] = {
"host": sock,
"port": port,
}
def disconnect(self, sock):
"""Disconnect Event -- Triggered for disconnecting clients"""
if sock not in self.clients:
return
del self.clients[sock]
def ready(self, server, bind):
print("Ready! Listening on {}:{}".format(*bind))
print("Waiting for remote events...")
def hello(self):
return "Hello World! ({0:d})".format(getpid())
def main():
opts, args = parse_options()
# Configure and "run" the System.
NodeServer(args, opts).run()
if __name__ == "__main__":
main()
| 0 |
# acl.py - changeset access control for mercurial
#
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''hooks for controlling repository access
This hook makes it possible to allow or deny write access to given
branches and paths of a repository when receiving incoming changesets
via pretxnchangegroup and pretxncommit.
The authorization is matched based on the local user name on the
system where the hook runs, and not the committer of the original
changeset (since the latter is merely informative).
The acl hook is best used along with a restricted shell like hgsh,
preventing authenticating users from doing anything other than pushing
or pulling. The hook is not safe to use if users have interactive
shell access, as they can then disable the hook. Nor is it safe if
remote users share an account, because then there is no way to
distinguish them.
The order in which access checks are performed is:
1) Deny list for branches (section ``acl.deny.branches``)
2) Allow list for branches (section ``acl.allow.branches``)
3) Deny list for paths (section ``acl.deny``)
4) Allow list for paths (section ``acl.allow``)
The allow and deny sections take key-value pairs.
Branch-based Access Control
---------------------------
Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
have branch-based access control. Keys in these sections can be
either:
- a branch name, or
- an asterisk, to match any branch;
The corresponding values can be either:
- a comma-separated list containing users and groups, or
- an asterisk, to match anyone;
You can add the "!" prefix to a user or group name to invert the sense
of the match.
Path-based Access Control
-------------------------
Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
access control. Keys in these sections accept a subtree pattern (with
a glob syntax by default). The corresponding values follow the same
syntax as the other sections above.
Groups
------
Group names must be prefixed with an ``@`` symbol. Specifying a group
name has the same effect as specifying all the users in that group.
You can define group members in the ``acl.groups`` section.
If a group name is not defined there, and Mercurial is running under
a Unix-like system, the list of users will be taken from the OS.
Otherwise, an exception will be raised.
Example Configuration
---------------------
::
[hooks]
# Use this if you want to check access restrictions at commit time
pretxncommit.acl = python:hgext.acl.hook
# Use this if you want to check access restrictions for pull, push,
# bundle and serve.
pretxnchangegroup.acl = python:hgext.acl.hook
[acl]
# Allow or deny access for incoming changes only if their source is
# listed here, let them pass otherwise. Source is "serve" for all
# remote access (http or ssh), "push", "pull" or "bundle" when the
# related commands are run locally.
# Default: serve
sources = serve
[acl.deny.branches]
# Everyone is denied to the frozen branch:
frozen-branch = *
# A bad user is denied on all branches:
* = bad-user
[acl.allow.branches]
# A few users are allowed on branch-a:
branch-a = user-1, user-2, user-3
# Only one user is allowed on branch-b:
branch-b = user-1
# The super user is allowed on any branch:
* = super-user
# Everyone is allowed on branch-for-tests:
branch-for-tests = *
[acl.deny]
# This list is checked first. If a match is found, acl.allow is not
# checked. All users are granted access if acl.deny is not present.
# Format for both lists: glob pattern = user, ..., @group, ...
# To match everyone, use an asterisk for the user:
# my/glob/pattern = *
# user6 will not have write access to any file:
** = user6
# Group "hg-denied" will not have write access to any file:
** = @hg-denied
# Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
# everyone being able to change all other files. See below.
src/main/resources/DONT-TOUCH-THIS.txt = *
[acl.allow]
# if acl.allow is not present, all users are allowed by default
# empty acl.allow = no users allowed
# User "doc_writer" has write access to any file under the "docs"
# folder:
docs/** = doc_writer
# User "jack" and group "designers" have write access to any file
# under the "images" folder:
images/** = jack, @designers
# Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
# will have write access to any file under the "resources" folder
# (except for 1 file. See acl.deny):
src/main/resources/** = *
.hgtags = release_engineer
Examples using the "!" prefix
.............................
Suppose there's a branch that only a given user (or group) should be able to
push to, and you don't want to restrict access to any other branch that may
be created.
The "!" prefix allows you to prevent anyone except a given user or group to
push changesets in a given branch or path.
In the examples below, we will:
1) Deny access to branch "ring" to anyone but user "gollum"
2) Deny access to branch "lake" to anyone but members of the group "hobbit"
3) Deny access to a file to anyone but user "gollum"
::
[acl.allow.branches]
# Empty
[acl.deny.branches]
# 1) only 'gollum' can commit to branch 'ring';
# 'gollum' and anyone else can still commit to any other branch.
ring = !gollum
# 2) only members of the group 'hobbit' can commit to branch 'lake';
# 'hobbit' members and anyone else can still commit to any other branch.
lake = !@hobbit
# You can also deny access based on file paths:
[acl.allow]
# Empty
[acl.deny]
# 3) only 'gollum' can change the file below;
# 'gollum' and anyone else can still change any other file.
/misty/mountains/cave/ring = !gollum
'''
from mercurial.i18n import _
from mercurial import util, match
import getpass, urllib
testedwith = 'internal'
def _getusers(ui, group):
# First, try to use group definition from section [acl.groups]
hgrcusers = ui.configlist('acl.groups', group)
if hgrcusers:
return hgrcusers
ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
# If no users found in group definition, get users from OS-level group
try:
return util.groupmembers(group)
except KeyError:
raise util.Abort(_("group '%s' is undefined") % group)
def _usermatch(ui, user, usersorgroups):
if usersorgroups == '*':
return True
for ug in usersorgroups.replace(',', ' ').split():
if ug.startswith('!'):
# Test for excluded user or group. Format:
# if ug is a user name: !username
# if ug is a group name: !@groupname
ug = ug[1:]
if not ug.startswith('@') and user != ug \
or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
return True
# Test for user or group. Format:
# if ug is a user name: username
# if ug is a group name: @groupname
elif user == ug \
or ug.startswith('@') and user in _getusers(ui, ug[1:]):
return True
return False
def buildmatch(ui, repo, user, key):
'''return tuple of (match function, list enabled).'''
if not ui.has_section(key):
ui.debug('acl: %s not enabled\n' % key)
return None
pats = [pat for pat, users in ui.configitems(key)
if _usermatch(ui, user, users)]
ui.debug('acl: %s enabled, %d entries for user %s\n' %
(key, len(pats), user))
# Branch-based ACL
if not repo:
if pats:
# If there's an asterisk (meaning "any branch"), always return True;
# Otherwise, test if b is in pats
if '*' in pats:
return util.always
return lambda b: b in pats
return util.never
# Path-based ACL
if pats:
return match.match(repo.root, '', pats)
return util.never
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
raise util.Abort(_('config error - hook type "%s" cannot stop '
'incoming changesets nor commits') % hooktype)
if (hooktype == 'pretxnchangegroup' and
source not in ui.config('acl', 'sources', 'serve').split()):
ui.debug('acl: changes have source "%s" - skipping\n' % source)
return
user = None
if source == 'serve' and 'url' in kwargs:
url = kwargs['url'].split(':')
if url[0] == 'remote' and url[1].startswith('http'):
user = urllib.unquote(url[3])
if user is None:
user = getpass.getuser()
ui.debug('acl: checking access for user "%s"\n' % user)
cfg = ui.config('acl', 'config')
if cfg:
ui.readconfig(cfg, sections = ['acl.groups', 'acl.allow.branches',
'acl.deny.branches', 'acl.allow', 'acl.deny'])
allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
allow = buildmatch(ui, repo, user, 'acl.allow')
deny = buildmatch(ui, repo, user, 'acl.deny')
for rev in xrange(repo[node], len(repo)):
ctx = repo[rev]
branch = ctx.branch()
if denybranches and denybranches(branch):
raise util.Abort(_('acl: user "%s" denied on branch "%s"'
' (changeset "%s")')
% (user, branch, ctx))
if allowbranches and not allowbranches(branch):
raise util.Abort(_('acl: user "%s" not allowed on branch "%s"'
' (changeset "%s")')
% (user, branch, ctx))
ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
% (ctx, branch))
for f in ctx.files():
if deny and deny(f):
raise util.Abort(_('acl: user "%s" denied on "%s"'
' (changeset "%s")') % (user, f, ctx))
if allow and not allow(f):
raise util.Abort(_('acl: user "%s" not allowed on "%s"'
' (changeset "%s")') % (user, f, ctx))
ui.debug('acl: path access granted: "%s"\n' % ctx)
| 0.001641 |
import os
import shutil
import zipfile
import fnmatch
import uuid
def main():
kits = findAll(".")
for kit in kits:
print("* ", kit, " -> ", kits[kit])
print()
print()
print("Starting extraction:")
print("------------------------------------------")
extractKits(kits)
def findAll(dir):
print()
print("All zip files:")
print("---------------------------")
kits = {}
files = os.listdir(".")
for file in files:
if file.endswith(".zip"):
kits[file] = getType(file)
return kits
def getType(file):
if "-pp" in file:
return "paper"
if "-ap" in file:
return "alpha"
if "-ep" in file:
return "embellishment"
options = {1: "embellishment", 2: "alpha", 3: "paper", 4:"other"}
#DEBUG:
return options[1];
goodInput = False
while not goodInput:
print()
print("File: ", file)
print(" 1) Embellishment")
print(" 2) Alpha")
print(" 3) Paper")
print(" 4) Other")
action = input("Please Enter the Number (default = 1):")
if action is "":
return options[1];
if action.isdigit():
actionNum = int(action)
if actionNum > 0 and actionNum < len(options)+1:
return options[actionNum]
def extractKits(kits):
tmpDir = "./tmp";
kitNames = {}
x = 0
for kit in kits:
# kit = next(iter(kits.keys()))
x = x + 1
print()
print()
print()
print("Extracting: ", kit, " ( ", x, " of ", len(kits), ")")
kitStr = kit.rsplit("-", 1)[0]
print("Kit Name: ", kitStr)
if kitStr in kitNames:
name = input("Please Enter Kit Name (default = "+kitNames[kitStr]+"): ")
name = name or kitNames[kitStr]
else:
name = input("Please Enter Kit Name: ")
kitNames[kitStr] =name
if os.path.exists(tmpDir):
shutil.rmtree(tmpDir)
else:
os.makedirs(tmpDir)
if not os.path.exists("./" + name):
os.makedirs("./" + name)
kitzip = zipfile.ZipFile("./" + kit)
kitzip.extractall(tmpDir)
images = copyExtractedFiles("./" + name +"/")
createManifest(kit, name, images, kits[kit])
def copyExtractedFiles(dest):
matches = []
filenames = [".png", ".jpg"]
for rootpath, subdirs, files in os.walk("./tmp"):
for filename in files:
if os.path.splitext(filename)[1].lower() in filenames:
# print(os.path.join(rootpath, filename).replace('\\','/'))
shutil.move(os.path.join(rootpath, filename).replace('\\','/'), dest+filename)
matches.append(dest + filename)
return matches
def createManifest(kit, name, images, type):
manifest = []
manifest.append('<Manifest vendorid="0" vendorpackageid="0" maintaincopyright="True" dpi="300">')
manifest.append('<Groups />')
manifest.append('<Entries>')
for image in images:
manifest.append('<Image ID="'+str(uuid.uuid4())+'" Name="'+image+'" Group="Embellishment" />')
manifest.append('</Entries>')
manifest.append('</Manifest>')
with open('./'+name+'/package.manifestx', 'w') as f:
for line in manifest:
f.write(line + os.linesep)
if __name__ == "__main__":
main()
| 0.005027 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An optimizer that switches between several methods."""
import tensorflow as tf
from tensorflow.python.training import optimizer
class CompositeOptimizer(optimizer.Optimizer):
"""Optimizer that switches between several methods.
"""
def __init__(self,
optimizer1,
optimizer2,
switch,
use_locking=False,
name='Composite'):
"""Construct a new Composite optimizer.
Args:
optimizer1: A tf.python.training.optimizer.Optimizer object.
optimizer2: A tf.python.training.optimizer.Optimizer object.
switch: A tf.bool Tensor, selecting whether to use the first or the second
optimizer.
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Composite".
"""
super(CompositeOptimizer, self).__init__(use_locking, name)
self._optimizer1 = optimizer1
self._optimizer2 = optimizer2
self._switch = switch
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
return tf.cond(
self._switch,
lambda: self._optimizer1.apply_gradients(grads_and_vars,
global_step, name),
lambda: self._optimizer2.apply_gradients(grads_and_vars,
global_step, name)
)
def get_slot(self, var, name):
slot1 = self._optimizer1.get_slot(var, name)
slot2 = self._optimizer2.get_slot(var, name)
if slot1 and slot2:
raise LookupError('Slot named %s for variable %s populated for both '
'optimizers' % (name, var.name))
return slot1 or slot2
def get_slot_names(self):
return sorted(self._optimizer1.get_slot_names() +
self._optimizer2.get_slot_names())
| 0.003072 |
"""
This module contains helper classes and methods
for the facebook integration module
.. module:: application.facebook.facebook
.. moduleauthor:: Devin Schwab <[email protected]>
"""
import facebooksdk as fb
import models
from flask import flash
class AlbumList(object):
def __init__(self, token):
"""
Given an an access token this class
will get all albums for the object associated with the token
(i.e. a page or a user)
It will lazily construct an Album instance for each of
the album ids returned
"""
self.graph = fb.GraphAPI(token.access_token)
albums_data = self.graph.get_connections('me', 'albums')['data']
self.album_ids = {}
self.album_names = {}
for data in albums_data:
self.album_ids[data['id']] = data
self.album_names[data['name']] = data
def get_albums_by_name(self, names):
"""
Given a list of names this method will
return album objects for each matching name.
If a name is not found then it is silently ignored.
This method returns a dictionary mapping name
to Album object.
"""
albums = {}
for name in names:
if name in self.album_names:
if isinstance(self.album_names[name], Album):
albums[name] = self.album_names[name]
else:
self.album_names[name] = Album(graph=self.graph,
album_data=self.album_names[name])
self.album_ids[self.album_names[name].me] = self.album_names[name]
albums[name] = self.album_names[name]
return albums
def get_albums_by_id(self, ids):
"""
Given a list of ids this method will
return album objects for each matching id.
If an id is not found then it is silently ignored.
This method returns a dictionary mapping id to
Album object
"""
albums = {}
for album_id in ids:
if album_id in self.album_ids:
if isinstance(self.album_ids[album_id], Album):
albums[album_id] = self.album_ids[album_id]
else:
self.album_ids[album_id] = Album(graph=self.graph,
album_data=self.album_ids[album_id])
self.album_names[self.album_ids[album_id].name] = self.album_ids[album_id]
albums[album_id] = self.album_ids[album_id]
return albums
def get_all_albums_by_id(self):
"""
This method returns a dictionary of all
albums with album ids as the keys
"""
for album_id in self.album_ids:
if not isinstance(self.album_ids[album_id], Album):
self.album_ids[album_id] = Album(graph=self.graph,
album_data=self.album_ids[album_id])
self.album_names[self.album_ids[album_id].name] = self.album_ids[album_id]
return self.album_ids
def get_all_albums_by_name(self):
"""
This method returns a dictionary of all
albums with album names as the keys
"""
for name in self.album_names:
if not isinstance(self.album_names[name], Album):
self.album_names[name] = Album(graph=self.graph,
album_data=self.album_names[name])
self.album_ids[self.album_names[name].me] = self.album_names[name]
return self.album_names
class Album(object):
def __init__(self, graph=None, token=None, album_id=None, album_data=None):
"""
Initializes a new Album object.
If graph is provided then the graph object is saved to this
instance.
If the token is provided then the graph object for this token
is created and saved to this instance.
If both are none then an error is raised.
If album_id is provided then the graph object is queried
for the id and the album object populates itself with this data
If album_data is provided then the graph object is populated
with the data in the json derived object
If both are None then an error is raised
"""
if graph is None and token is None:
raise TypeError("Either a graph object must be provided or a token must be provided")
if graph is not None:
self.graph = graph
query = models.AccessTokenModel.all()
query.filter('access_token =', graph.access_token)
try:
self.token = query.fetch(1)[0]
except IndexError:
raise TypeError('The token object provided was not an AccessTokenModel instance')
else:
self.graph = fb.GraphAPI(token.access_token)
self.token = token
if album_id is None and album_data is None:
raise TypeError("Either an album id or a album data must be provided")
if album_id is not None:
album_data = self.graph.get_object(album_id)
self.me = album_data['id']
self.name = album_data['name']
self.desc = album_data.get('description', None)
self.count = album_data.get('count', 0)
if 'cover_photo' in album_data:
self.cover_photo = Photo(self.me, graph=self.graph, photo_id=album_data['cover_photo']).thumbnail
else:
self.cover_photo = None
def get_model(self):
query = models.AlbumModel.all()
query.filter('me =', self.me)
try:
return query.fetch(1)[0]
except IndexError:
cover_thumb = None
if self.cover_photo is not None:
cover_thumb = self.cover_photo
entity = models.AlbumModel(me=self.me,
token=self.token,
name=self.name,
desc=self.desc,
cover_photo=cover_thumb)
entity.put()
return entity
def get_photos(self):
"""
Get a list of Photo objects
"""
photos_data = self.graph.get_connections(self.me, 'photos')['data']
photos = []
for photo_data in photos_data:
query = models.PhotoModel.all()
query.filter('me =', photo_data['id'])
try:
photos.append(query.fetch(1)[0])
except IndexError:
name = None
if 'name' in photo_data:
name = photo_data['name']
orig = photo_data['images'][0]['source']
entity = models.PhotoModel(me=photo_data['id'],
album_id=self.me,
name=name,
thumbnail=photo_data['picture'],
original=orig)
entity.put()
photos.append(entity)
return photos
class Photo(object):
def __init__(self, album_id, graph=None, token=None, photo_id=None, photo_data=None):
if graph is None and token is None:
raise TypeError("Either a graph object must be provided or a token must be provided")
if graph is not None:
self.graph = graph
else:
self.graph = fb.GraphAPI(token.access_token)
if photo_id is None and photo_data is None:
raise TypeError("Either an album id or a album data must be provided")
if photo_id is not None:
photo_data = self.graph.get_object(photo_id)
self.me = photo_data['id']
self.name = photo_data.get('name', None)
self.thumbnail = photo_data['picture']
self.original = photo_data['images'][0]['source']
self.album_id = album_id
def get_model(self):
query = models.PhotoModel.all()
query.filter('me =', self.me)
try:
return query.fetch(1)[0]
except IndexError:
entity = models.PhotoModel(me=self.me,
album_id=self.album_id,
name=self.name,
thumbnail=self.thumbnail,
original=self.original)
entity.put()
return entity
| 0.003436 |
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| 0 |
# Copyright (C) 2010 Matthew McGowan
#
# Authors:
# Matthew McGowan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk
from gi.repository import GObject
from gi.repository import Pango
from softwarecenter.utils import normalize_package_description
from softwarecenter.ui.gtk3.drawing import color_to_hex
from softwarecenter.ui.gtk3.utils import point_in
_PS = Pango.SCALE
class _SpecialCasePreParsers(object):
def preparse(self, k, desc):
if k is None:
return desc
func_name = '_%s_preparser' % k.lower().replace('-', '_')
if not hasattr(self, func_name):
return desc
f = getattr(self, func_name)
return f(desc)
# special case pre-parsers
def _skype_preparser(self, desc):
return desc.replace('. *', '.\n*')
def _texlive_fonts_extra_preparser(self, desc):
return desc.replace(')\n', ').\n').replace('--\n', '--\n\n')
class EventHelper(dict):
# FIXME: workaround for broken event.copy()
class ButtonEvent(object):
def __init__(self, event):
self.x = event.x
self.y = event.y
self.type = event.type
self.button = event.button
VALID_KEYS = (
'event',
'layout',
'index',
'within-selection',
'drag-active',
'drag-context')
def __init__(self):
dict.__init__(self)
self.new_press(None, None, None, False)
def __setitem__(self, k, v):
if k not in EventHelper.VALID_KEYS:
raise KeyError('\"%s\" is not a valid key' % k)
return False
return dict.__setitem__(self, k, v)
def new_press(self, event, layout, index, within_sel):
if event is None:
self['event'] = None
else:
# this should be simply event.copy() but that appears broken
# currently(?)
self['event'] = EventHelper.ButtonEvent(event)
self['layout'] = layout
self['index'] = index
self['within-selection'] = within_sel
self['drag-active'] = False
self['drag-context'] = None
class PangoLayoutProxy(object):
""" Because i couldn't figure out how to inherit from
pygi's Pango.Layout... """
def __init__(self, context):
self._layout = Pango.Layout.new(context)
def xy_to_index(self, x, y):
return self._layout.xy_to_index(x, y)
def index_to_pos(self, *args):
return self._layout.index_to_pos(*args)
# setter proxies
def set_attributes(self, attrs):
return self._layout.set_attributes(attrs)
def set_markup(self, markup):
return self._layout.set_markup(markup, -1)
def set_font_description(self, font_desc):
return self._layout.set_font_description(font_desc)
def set_wrap(self, wrap_mode):
return self._layout.set_wrap(wrap_mode)
def set_width(self, width):
return self._layout.set_width(width)
# getter proxies
def get_text(self):
return self._layout.get_text()
def get_pixel_extents(self):
return self._layout.get_pixel_extents()[1]
def get_cursor_pos(self, index):
return self._layout.get_cursor_pos(index)
def get_iter(self):
return self._layout.get_iter()
def get_extents(self):
return self._layout.get_extents()
class Layout(PangoLayoutProxy):
def __init__(self, widget, text=""):
PangoLayoutProxy.__init__(self, widget.get_pango_context())
self.widget = widget
self.length = 0
self.indent = 0
self.vspacing = None
self.is_bullet = False
self.index = 0
self.allocation = Gdk.Rectangle()
self._default_attrs = True
self.set_markup(text)
def __len__(self):
return self.length
def set_text(self, text):
PangoLayoutProxy.set_markup(self, text)
self.length = len(self.get_text())
def set_allocation(self, x, y, w, h):
a = self.allocation
a.x = x
a.y = y
a.width = w
a.height = h
def get_position(self):
return self.allocation.x, self.allocation.y
def cursor_up(self, cursor, target_x=-1):
layout = self.widget.order[cursor.paragraph]
pos = layout.index_to_pos(cursor.index)
x, y = pos.x, pos.y
if target_x >= 0:
x = target_x
y -= _PS * self.widget.line_height
return layout.xy_to_index(x, y), (x, y)
def cursor_down(self, cursor, target_x=-1):
layout = self.widget.order[cursor.paragraph]
pos = layout.index_to_pos(cursor.index)
x, y = pos.x, pos.y
if target_x >= 0:
x = target_x
y += _PS * self.widget.line_height
return layout.xy_to_index(x, y), (x, y)
def index_at(self, px, py):
#wa = self.widget.get_allocation()
x, y = self.get_position() # layout allocation
(_, index, k) = self.xy_to_index((px - x) * _PS, (py - y) * _PS)
return point_in(self.allocation, px, py), index + k
def reset_attrs(self):
#~ self.set_attributes(Pango.AttrList())
self.set_markup(self.get_text())
self._default_attrs = True
def highlight(self, start, end, bg, fg):
# FIXME: AttrBackground doesnt seem to be expose by gi yet??
#~ attrs = Pango.AttrList()
#~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, start,
#~ end))
#~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, start,
#~ end))
#~ self.set_attributes(attrs)
# XXX: workaround
text = self.get_text()
new_text = (text[:start] + '<span background="%s" foreground="%s">' %
(bg, fg))
new_text += text[start:end]
new_text += '</span>' + text[end:]
self.set_markup(new_text)
self._default_attrs = False
def highlight_all(self, bg, fg):
# FIXME: AttrBackground doesnt seem to be expose by gi yet??
#~ attrs = Pango.AttrList()
#~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, 0, -1))
#~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, 0, -1))
#~ self.set_attributes(attrs)
# XXX: workaround
text = self.get_text()
self.set_markup('<span background="%s" foreground="%s">%s</span>' %
(bg, fg, text))
self._default_attrs = False
class Cursor(object):
WORD_TERMINATORS = (' ',) # empty space. suggestions recommended...
def __init__(self, parent):
self.parent = parent
self.index = 0
self.paragraph = 0
def is_min(self, cursor):
return self.get_position() <= cursor.get_position()
def is_max(self, cursor):
return self.get_position() >= cursor.get_position()
def switch(self, cursor):
this_pos = self.get_position()
other_pos = cursor.get_position()
self.set_position(*other_pos)
cursor.set_position(*this_pos)
def same_line(self, cursor):
return self.get_current_line()[0] == cursor.get_current_line()[0]
def get_current_line(self):
keep_going = True
i, it = self.index, self.parent.order[self.paragraph].get_iter()
ln = 0
while keep_going:
l = it.get_line()
ls = l.start_index
le = ls + l.length
if i >= ls and i <= le:
if not it.at_last_line():
le -= 1
return (self.paragraph, ln), (ls, le)
ln += 1
keep_going = it.next_line()
return None, None, None
def get_current_word(self):
keep_going = True
layout = self.parent.order[self.paragraph]
text = layout.get_text()
i, it = self.index, layout.get_iter()
start = 0
while keep_going:
j = it.get_index()
if j >= i and text[j] in self.WORD_TERMINATORS:
return self.paragraph, (start, j)
elif text[j] in self.WORD_TERMINATORS:
start = j + 1
keep_going = it.next_char()
return self.paragraph, (start, len(layout))
def set_position(self, paragraph, index):
self.index = index
self.paragraph = paragraph
def get_position(self):
return self.paragraph, self.index
class PrimaryCursor(Cursor):
def __init__(self, parent):
Cursor.__init__(self, parent)
def __repr__(self):
return 'Cursor: ' + str((self.paragraph, self.index))
def get_rectangle(self, layout, a):
if self.index < len(layout):
pos = layout.get_cursor_pos(self.index)[1]
else:
pos = layout.get_cursor_pos(len(layout))[1]
x = layout.allocation.x + pos.x / _PS
y = layout.allocation.y + pos.y / _PS
return x, y, 1, pos.height / _PS
def draw(self, cr, layout, a):
cr.set_source_rgb(0, 0, 0)
cr.rectangle(*self.get_rectangle(layout, a))
cr.fill()
def zero(self):
self.index = 0
self.paragraph = 0
class SelectionCursor(Cursor):
def __init__(self, cursor):
Cursor.__init__(self, cursor.parent)
self.cursor = cursor
self.target_x = None
self.target_x_indent = 0
self.restore_point = None
def __repr__(self):
return 'Selection: ' + str(self.get_range())
def __nonzero__(self):
c = self.cursor
return (self.paragraph, self.index) != (c.paragraph, c.index)
@property
def min(self):
c = self.cursor
return min((self.paragraph, self.index), (c.paragraph, c.index))
@property
def max(self):
c = self.cursor
return max((self.paragraph, self.index), (c.paragraph, c.index))
def clear(self, key=None):
self.index = self.cursor.index
self.paragraph = self.cursor.paragraph
self.restore_point = None
if key not in (Gdk.KEY_uparrow, Gdk.KEY_downarrow):
self.target_x = None
self.target_x_indent = 0
def set_target_x(self, x, indent):
self.target_x = x
self.target_x_indent = indent
def get_range(self):
return self.min, self.max
def within_selection(self, pos):
l = list(self.get_range())
l.append(pos)
l.sort()
# sort the list, see if pos is in between the extents of the selection
# range, if it is, pos is within the selection
if pos in l:
return l.index(pos) == 1
return False
class TextBlock(Gtk.EventBox):
PAINT_PRIMARY_CURSOR = False
DEBUG_PAINT_BBOXES = False
BULLET_POINT = u' \u2022 '
def __init__(self):
Gtk.EventBox.__init__(self)
self.set_visible_window(False)
self.set_size_request(200, -1)
self.set_can_focus(True)
self.set_events(Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK)
self._is_new = False
self.order = []
self.cursor = cur = PrimaryCursor(self)
self.selection = sel = SelectionCursor(self.cursor)
self.clipboard = None
#~ event_helper = EventHelper()
self._update_cached_layouts()
self._test_layout = self.create_pango_layout('')
#self._xterm = Gdk.Cursor.new(Gdk.XTERM)
# popup menu and menuitem's
self.copy_menuitem = Gtk.ImageMenuItem.new_from_stock(
Gtk.STOCK_COPY, None)
self.select_all_menuitem = Gtk.ImageMenuItem.new_from_stock(
Gtk.STOCK_SELECT_ALL, None)
self.menu = Gtk.Menu()
self.menu.attach_to_widget(self, None)
self.menu.append(self.copy_menuitem)
self.menu.append(self.select_all_menuitem)
self.menu.show_all()
self.copy_menuitem.connect('select', self._menu_do_copy, sel)
self.select_all_menuitem.connect('select', self._menu_do_select_all,
cur, sel)
#~ Gtk.drag_source_set(self, Gdk.ModifierType.BUTTON1_MASK,
#~ None, Gdk.DragAction.COPY)
#~ Gtk.drag_source_add_text_targets(self)
#~ self.connect('drag-begin', self._on_drag_begin)
#~ self.connect('drag-data-get', self._on_drag_data_get, sel)
event_helper = EventHelper()
self.connect('button-press-event', self._on_press, event_helper, cur,
sel)
self.connect('button-release-event', self._on_release, event_helper,
cur, sel)
self.connect('motion-notify-event', self._on_motion, event_helper,
cur, sel)
self.connect('key-press-event', self._on_key_press, cur, sel)
self.connect('key-release-event', self._on_key_release, cur, sel)
self.connect('focus-in-event', self._on_focus_in)
self.connect('focus-out-event', self._on_focus_out)
self.connect("size-allocate", self.on_size_allocate)
self.connect('style-updated', self._on_style_updated)
def on_size_allocate(self, *args):
allocation = self.get_allocation()
width = allocation.width
x = y = 0
for layout in self.order:
layout.set_width(_PS * (width - layout.indent))
if layout.index > 0:
y += (layout.vspacing or self.line_height)
e = layout.get_pixel_extents()
if self.get_direction() != Gtk.TextDirection.RTL:
layout.set_allocation(e.x + layout.indent, y + e.y,
width - layout.indent, e.height)
else:
layout.set_allocation(x + width - e.x - e.width -
layout.indent - 1, y + e.y, width - layout.indent,
e.height)
y += e.y + e.height
# overrides
def do_get_request_mode(self):
return Gtk.SizeRequestMode.HEIGHT_FOR_WIDTH
def do_get_preferred_height_for_width(self, width):
height = 0
layout = self._test_layout
for l in self.order:
layout.set_text(l.get_text(), -1)
layout.set_width(_PS * (width - l.indent))
lh = layout.get_pixel_extents()[1].height
height += lh + (l.vspacing or self.line_height)
height = max(50, height)
return height, height
def do_draw(self, cr):
self.render(self, cr)
def _config_colors(self):
context = self.get_style_context()
context.save()
context.add_class(Gtk.STYLE_CLASS_HIGHLIGHT)
state = self.get_state_flags()
if self.has_focus():
state |= Gtk.StateFlags.FOCUSED
context.set_state(state)
self._bg = color_to_hex(context.get_background_color(state))
self._fg = color_to_hex(context.get_color(state))
context.restore()
def _on_style_updated(self, widget):
self._config_colors()
self._update_cached_layouts()
# def _on_drag_begin(self, widgets, context, event_helper):
# print 'drag: begin'
def _on_drag_data_get(self, widget, context, selection, info, timestamp,
sel):
# print 'drag: get data'
text = self.get_selected_text(sel)
selection.set_text(text, -1)
def _on_focus_in(self, widget, event):
self._config_colors()
def _on_focus_out(self, widget, event):
self._config_colors()
def _on_motion(self, widget, event, event_helper, cur, sel):
if not (event.state == Gdk.ModifierType.BUTTON1_MASK):
# or not self.has_focus():
return
# check if we have moved enough to count as a drag
press = event_helper['event']
# mvo: how can this be?
if not press:
return
start_x, start_y = int(press.x), int(press.y)
cur_x, cur_y = int(event.x), int(event.y)
if (not event_helper['drag-active'] and
self.drag_check_threshold(start_x, start_y, cur_x, cur_y)):
event_helper['drag-active'] = True
if not event_helper['drag-active']:
return
#~ if (event_helper['within-selection'] and
#~ not event_helper['drag-context']):
#~ target_list = Gtk.TargetList()
#~ target_list.add_text_targets(80)
#~ ctx = self.drag_begin(target_list, # target list
#~ Gdk.DragAction.COPY, # action
#~ 1, # initiating button
#~ event) # event
#~
#~ event_helper['drag-context'] = ctx
#~ return
for layout in self.order:
point_in, index = layout.index_at(cur_x, cur_y)
if point_in:
cur.set_position(layout.index, index)
self.queue_draw()
break
def _on_press(self, widget, event, event_helper, cur, sel):
if sel and not self.has_focus():
self.grab_focus()
return # spot the difference
if not self.has_focus():
self.grab_focus()
if event.button == 3:
self._button3_action(cur, sel, event)
return
elif event.button != 1:
return
for layout in self.order:
x, y = int(event.x), int(event.y)
point_in, index = layout.index_at(x, y)
if point_in:
within_sel = False
#~ within_sel = sel.within_selection((layout.index, index))
if not within_sel:
cur.set_position(layout.index, index)
sel.clear()
#~ event_helper.new_press(event.copy(), layout, index,
#~ within_sel)
event_helper.new_press(event, layout, index, within_sel)
break
def _on_release(self, widget, event, event_helper, cur, sel):
if not event_helper['event']:
return
# check if a drag occurred
if event_helper['drag-active']:
# if so, do not handle release
return
# else, handle release, do click
cur.set_position(event_helper['layout'].index,
event_helper['index'])
sel.clear()
press = event_helper['event']
if (press.type == Gdk.EventType._2BUTTON_PRESS):
self._2click_select(cur, sel)
elif (press.type == Gdk.EventType._3BUTTON_PRESS):
self._3click_select(cur, sel)
self.queue_draw()
def _menu_do_copy(self, item, sel):
self._copy_text(sel)
def _menu_do_select_all(self, item, cur, sel):
self._select_all(cur, sel)
def _button3_action(self, cur, sel, event):
start, end = sel.get_range()
self.copy_menuitem.set_sensitive(True)
self.select_all_menuitem.set_sensitive(True)
if not sel:
self.copy_menuitem.set_sensitive(False)
elif start == (0, 0) and \
end == (len(self.order) - 1, len(self.order[-1])):
self.select_all_menuitem.set_sensitive(False)
self.menu.popup(None, # parent_menu_shell,
None, # parent_menu_item,
None, # GtkMenuPositionFunc func,
None, # data,
event.button,
event.time)
def _on_key_press(self, widget, event, cur, sel):
kv = event.keyval
s, i = cur.paragraph, cur.index
handled_keys = True
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
shift = (event.state & Gdk.ModifierType.SHIFT_MASK) > 0
if not self.PAINT_PRIMARY_CURSOR and \
kv in (Gdk.KEY_uparrow, Gdk.KEY_downarrow) and not sel:
return False
if kv == Gdk.KEY_Tab:
handled_keys = False
elif kv == Gdk.KEY_Left:
if ctrl:
self._select_left_word(cur, sel, s, i)
else:
self._select_left(cur, sel, s, i, shift)
if shift:
layout = self._get_cursor_layout()
pos = layout.index_to_pos(cur.index)
sel.set_target_x(pos.x, layout.indent)
elif kv == Gdk.KEY_Right:
if ctrl:
self._select_right_word(cur, sel, s, i)
else:
self._select_right(cur, sel, s, i, shift)
if shift:
layout = self._get_cursor_layout()
pos = layout.index_to_pos(cur.index)
sel.set_target_x(pos.x, layout.indent)
elif kv == Gdk.KEY_Up:
if ctrl:
if i == 0:
if s > 0:
cur.paragraph -= 1
cur.set_position(cur.paragraph, 0)
elif sel and not shift:
cur.set_position(*sel.min)
else:
self._select_up(cur, sel)
elif kv == Gdk.KEY_Down:
if ctrl:
if i == len(self._get_layout(cur)):
if s + 1 < len(self.order):
cur.paragraph += 1
i = len(self._get_layout(cur))
cur.set_position(cur.paragraph, i)
elif sel and not shift:
cur.set_position(*sel.max)
else:
self._select_down(cur, sel)
elif kv == Gdk.KEY_Home:
if shift:
self._select_home(cur, sel, self.order[cur.paragraph])
else:
cur.set_position(0, 0)
elif kv == Gdk.KEY_End:
if shift:
self._select_end(cur, sel, self.order[cur.paragraph])
else:
cur.paragraph = len(self.order) - 1
cur.index = len(self._get_layout(cur))
else:
handled_keys = False
if not shift and handled_keys:
sel.clear(kv)
self.queue_draw()
return handled_keys
def _on_key_release(self, widget, event, cur, sel):
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
if ctrl:
if event.keyval == Gdk.KEY_a:
self._select_all(cur, sel)
elif event.keyval == Gdk.KEY_c:
self._copy_text(sel)
self.queue_draw()
def _select_up(self, cur, sel):
#~ if sel and not cur.is_min(sel) and cur.same_line(sel):
#~ cur.switch(sel)
s = cur.paragraph
layout = self._get_layout(cur)
if sel.target_x:
x = sel.target_x
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
(_, j, k), (x, y) = layout.cursor_up(cur, x)
j += k
else:
(_, j, k), (x, y) = layout.cursor_up(cur)
j += k
sel.set_target_x(x, layout.indent)
if (s, j) != cur.get_position():
cur.set_position(s, j)
elif s > 0:
cur.paragraph = s - 1
layout = self._get_layout(cur)
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
y = layout.get_extents()[0].height
(_, j, k) = layout.xy_to_index(x, y)
cur.set_position(s - 1, j + k)
else:
return False
return True
def _select_down(self, cur, sel):
#~ if sel and not cur.is_max(sel) and cur.same_line(sel):
#~ cur.switch(sel)
s = cur.paragraph
layout = self._get_layout(cur)
if sel.target_x:
x = sel.target_x
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
(_, j, k), (x, y) = layout.cursor_down(cur, x)
j += k
else:
(_, j, k), (x, y) = layout.cursor_down(cur)
j += k
sel.set_target_x(x, layout.indent)
if (s, j) != cur.get_position():
cur.set_position(s, j)
elif s < len(self.order) - 1:
cur.paragraph = s + 1
layout = self._get_layout(cur)
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
y = 0
(_, j, k) = layout.xy_to_index(x, y)
cur.set_position(s + 1, j + k)
else:
return False
return True
def _2click_select(self, cursor, sel):
self._select_word(cursor, sel)
def _3click_select(self, cursor, sel):
# XXX:
# _select_line seems to expose the following Pango issue:
# (description.py:3892): Pango-CRITICAL **:
# pango_layout_line_unref: assertion `private->ref_count > 0'
# failed
# ... which can result in a segfault
#~ self._select_line(cursor, sel)
self._select_all(cursor, sel)
def _copy_text(self, sel):
text = self.get_selected_text(sel)
if not self.clipboard:
display = Gdk.Display.get_default()
selection = Gdk.Atom.intern("CLIPBOARD", False)
self.clipboard = Gtk.Clipboard.get_for_display(display, selection)
self.clipboard.clear()
self.clipboard.set_text(text.strip(), -1)
def _select_end(self, cur, sel, layout):
if not cur.is_max(sel):
cur.switch(sel)
n, r, line = cur.get_current_line()
cur_pos = cur.get_position()
if cur_pos == (len(self.order) - 1, len(self.order[-1])): # abs end
if sel.restore_point:
# reinstate restore point
cur.set_position(*sel.restore_point)
else:
# reselect the line end
n, r, line = sel.get_current_line()
cur.set_position(n[0], r[1])
elif cur_pos[1] == len(self.order[n[0]]): # para end
# select abs end
cur.set_position(len(self.order) - 1, len(self.order[-1]))
elif cur_pos == (n[0], r[1]): # line end
# select para end
cur.set_position(n[0], len(self.order[n[0]]))
else: # not at any end, within line somewhere
# select line end
if sel:
sel.restore_point = cur_pos
cur.set_position(n[0], r[1])
def _select_home(self, cur, sel, layout):
if not cur.is_min(sel):
cur.switch(sel)
n, r, line = cur.get_current_line()
cur_pos = cur.get_position()
if cur_pos == (0, 0): # absolute home
if sel.restore_point:
cur.set_position(*sel.restore_point)
else:
n, r, line = sel.get_current_line()
cur.set_position(n[0], r[0])
elif cur_pos[1] == 0: # para home
cur.set_position(0, 0)
elif cur_pos == (n[0], r[0]): # line home
cur.set_position(n[0], 0)
else: # not at any home, within line somewhere
if sel:
sel.restore_point = cur_pos
cur.set_position(n[0], r[0])
def _select_left(self, cur, sel, s, i, shift):
if not shift and not cur.is_min(sel):
cur.switch(sel)
return
if i > 0:
cur.set_position(s, i - 1)
elif cur.paragraph > 0:
cur.paragraph -= 1
cur.set_position(s - 1, len(self._get_layout(cur)))
def _select_right(self, cur, sel, s, i, shift):
if not shift and not cur.is_max(sel):
cur.switch(sel)
return
if i < len(self._get_layout(cur)):
cur.set_position(s, i + 1)
elif s < len(self.order) - 1:
cur.set_position(s + 1, 0)
def _select_left_word(self, cur, sel, s, i):
if i > 0:
cur.index -= 1
elif s > 0:
cur.paragraph -= 1
cur.index = len(self._get_layout(cur))
paragraph, word = cur.get_current_word()
if not word:
return
cur.set_position(paragraph, max(0, word[0] - 1))
def _select_right_word(self, cur, sel, s, i):
ll = len(self._get_layout(cur))
if i < ll:
cur.index += 1
elif s + 1 < len(self.order):
cur.paragraph += 1
cur.index = 0
paragraph, word = cur.get_current_word()
if not word:
return
cur.set_position(paragraph, min(word[1] + 1, ll))
def _select_word(self, cursor, sel):
paragraph, word = cursor.get_current_word()
if word:
cursor.set_position(paragraph, word[1] + 1)
sel.set_position(paragraph, word[0])
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
def _select_line(self, cursor, sel):
n, r = self.cursor.get_current_line()
sel.set_position(n[0], r[0])
cursor.set_position(n[0], r[1])
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
def _select_all(self, cursor, sel):
layout = self.order[-1]
sel.set_position(0, 0)
cursor.set_position(layout.index, len(layout))
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
def _selection_copy(self, layout, sel, new_para=True):
i = layout.index
start, end = sel.get_range()
if new_para:
text = '\n\n'
else:
text = ''
if sel and i >= start[0] and i <= end[0]:
if i == start[0]:
if end[0] > i:
return text + layout.get_text()[start[1]: len(layout)]
else:
return text + layout.get_text()[start[1]: end[1]]
elif i == end[0]:
if start[0] < i:
return text + layout.get_text()[0: end[1]]
else:
return text + layout.get_text()[start[1]: end[1]]
else:
return text + layout.get_text()
return ''
def _new_layout(self, text=''):
layout = Layout(self, text)
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
return layout
def _update_cached_layouts(self):
self._bullet = self._new_layout()
self._bullet.set_markup(self.BULLET_POINT)
font_desc = Pango.FontDescription()
font_desc.set_weight(Pango.Weight.BOLD)
self._bullet.set_font_description(font_desc)
e = self._bullet.get_pixel_extents()
self.indent, self.line_height = e.width, e.height
def _selection_highlight(self, layout, sel, bg, fg):
i = layout.index
start, end = sel.get_range()
if sel and i >= start[0] and i <= end[0]:
if i == start[0]:
if end[0] > i:
layout.highlight(start[1], len(layout), bg, fg)
else:
layout.highlight(start[1], end[1], bg, fg)
elif i == end[0]:
if start[0] < i:
layout.highlight(0, end[1], bg, fg)
else:
layout.highlight(start[1], end[1], bg, fg)
else:
layout.highlight_all(bg, fg)
elif not layout._default_attrs:
layout.reset_attrs()
def _paint_bullet_point(self, cr, x, y):
# draw the layout
Gtk.render_layout(self.get_style_context(),
cr, # state
x, # x coord
y, # y coord
self._bullet._layout) # a Pango.Layout()
def _get_layout(self, cursor):
return self.order[cursor.paragraph]
def _get_cursor_layout(self):
return self.order[self.cursor.paragraph]
def _get_selection_layout(self):
return self.order[self.selection.paragraph]
def render(self, widget, cr):
if not self.order:
return
a = self.get_allocation()
for layout in self.order:
lx, ly = layout.get_position()
self._selection_highlight(layout,
self.selection,
self._bg, self._fg)
if layout.is_bullet:
if self.get_direction() != Gtk.TextDirection.RTL:
indent = layout.indent - self.indent
else:
indent = a.width - layout.indent
self._paint_bullet_point(cr, indent, ly)
if self.DEBUG_PAINT_BBOXES:
la = layout.allocation
cr.rectangle(la.x, la.y, la.width, la.height)
cr.set_source_rgb(1, 0, 0)
cr.stroke()
# draw the layout
Gtk.render_layout(self.get_style_context(),
cr,
lx, # x coord
ly, # y coord
layout._layout) # a Pango.Layout()
# draw the cursor
if self.PAINT_PRIMARY_CURSOR and self.has_focus():
self.cursor.draw(cr, self._get_layout(self.cursor), a)
def append_paragraph(self, p, vspacing=None):
l = self._new_layout()
l.index = len(self.order)
l.vspacing = vspacing
l.set_text(p)
self.order.append(l)
def append_bullet(self, point, indent_level, vspacing=None):
l = self._new_layout()
l.index = len(self.order)
l.indent = self.indent * (indent_level + 1)
l.vspacing = vspacing
l.is_bullet = True
l.set_text(point)
self.order.append(l)
def copy_clipboard(self):
self._copy_text(self.selection)
def get_selected_text(self, sel=None):
text = ''
if not sel:
sel = self.selection
for layout in self.order:
text += self._selection_copy(layout, sel, (layout.index > 0))
return text
def select_all(self):
self._select_all(self.cursor, self.selection)
self.queue_draw()
def finished(self):
self.queue_resize()
def clear(self, key=None):
self.cursor.zero()
self.selection.clear(key)
self.order = []
class AppDescription(Gtk.VBox):
TYPE_PARAGRAPH = 0
TYPE_BULLET = 1
_preparser = _SpecialCasePreParsers()
def __init__(self):
Gtk.VBox.__init__(self)
self.description = TextBlock()
self.pack_start(self.description, False, False, 0)
self._prev_type = None
def _part_is_bullet(self, part):
# normalize_description() ensures that we only have "* " bullets
i = part.find("* ")
return i > -1, i
def _parse_desc(self, desc, pkgname):
""" Attempt to maintain original fixed width layout, while
reconstructing the description into text blocks
(either paragraphs or bullets) which are line-wrap friendly.
"""
# pre-parse descrition if special case exists for the given pkgname
desc = self._preparser.preparse(pkgname, desc)
parts = normalize_package_description(desc).split('\n')
for part in parts:
if not part:
continue
is_bullet, indent = self._part_is_bullet(part)
if is_bullet:
self.append_bullet(part, indent)
else:
self.append_paragraph(part)
self.description.finished()
def clear(self):
self.description.clear()
def append_paragraph(self, p):
vspacing = self.description.line_height
self.description.append_paragraph(p.strip(), vspacing)
self._prev_type = self.TYPE_PARAGRAPH
def append_bullet(self, point, indent_level):
if self._prev_type == self.TYPE_BULLET:
vspacing = int(0.4 * self.description.line_height)
else:
vspacing = self.description.line_height
self.description.append_bullet(
point[indent_level + 2:], indent_level, vspacing)
self._prev_type = self.TYPE_BULLET
def set_description(self, raw_desc, pkgname):
self.clear()
if type(raw_desc) == str:
encoded_desc = unicode(raw_desc, 'utf8').encode('utf8')
else:
encoded_desc = raw_desc.encode('utf8')
self._text = GObject.markup_escape_text(encoded_desc)
self._parse_desc(self._text, pkgname)
self.show_all()
# easy access to some TextBlock methods
def copy_clipboard(self):
return TextBlock.copy_clipboard(self.description)
def get_selected_text(self):
return TextBlock.get_selected_text(self.description)
def select_all(self):
return TextBlock.select_all(self.description)
def get_test_description_window():
EXAMPLE0 = """p7zip is the Unix port of 7-Zip, a file archiver that \
archives with very high compression ratios.
p7zip-full provides:
- /usr/bin/7za a standalone version of the 7-zip tool that handles
7z archives (implementation of the LZMA compression algorithm) and some \
other formats.
- /usr/bin/7z not only does it handle 7z but also ZIP, Zip64, CAB, RAR, \
ARJ, GZIP,
BZIP2, TAR, CPIO, RPM, ISO and DEB archives. 7z compression is 30-50% \
better than ZIP compression.
p7zip provides 7zr, a light version of 7za, and p7zip a gzip like wrapper \
around 7zr."""
EXAMPLE1 = """Transmageddon supports almost any format as its input and \
can generate a very large host of output files. The goal of the application \
was to help people to create the files they need to be able to play on their \
mobile devices and for people not hugely experienced with multimedia to \
generate a multimedia file without having to resort to command line tools \
with ungainly syntaxes.
The currently supported codecs are:
* Containers:
- Ogg
- Matroska
- AVI
- MPEG TS
- flv
- QuickTime
- MPEG4
- 3GPP
- MXT
* Audio encoders:
- Vorbis
- FLAC
- MP3
- AAC
- AC3
- Speex
- Celt
* Video encoders:
- Theora
- Dirac
- H264
- MPEG2
- MPEG4/DivX5
- xvid
- DNxHD
It also provide the support for the GStreamer's plugins auto-search."""
EXAMPLE2 = """File-roller is an archive manager for the GNOME \
environment. It allows you to:
* Create and modify archives.
* View the content of an archive.
* View a file contained in an archive.
* Extract files from the archive.
File-roller supports the following formats:
* Tar (.tar) archives, including those compressed with
gzip (.tar.gz, .tgz), bzip (.tar.bz, .tbz), bzip2 (.tar.bz2, .tbz2),
compress (.tar.Z, .taz), lzip (.tar.lz, .tlz), lzop (.tar.lzo, .tzo),
lzma (.tar.lzma) and xz (.tar.xz)
* Zip archives (.zip)
* Jar archives (.jar, .ear, .war)
* 7z archives (.7z)
* iso9660 CD images (.iso)
* Lha archives (.lzh)
* Single files compressed with gzip (.gz), bzip (.bz), bzip2 (.bz2),
compress (.Z), lzip (.lz), lzop (.lzo), lzma (.lzma) and xz (.xz)
File-roller doesn't perform archive operations by itself, but relies on \
standard tools for this."""
EXAMPLE3 = """This package includes the following CTAN packages:
Asana-Math -- A font to typeset maths in Xe(La)TeX.
albertus --
allrunes -- Fonts and LaTeX package for almost all runes.
antiqua -- the URW Antiqua Condensed Font.
antp -- Antykwa Poltawskiego: a Type 1 family of Polish traditional type.
antt -- Antykwa Torunska: a Type 1 family of a Polish traditional type.
apl -- Fonts for typesetting APL programs.
ar -- Capital A and capital R ligature for Apsect Ratio.
archaic -- A collection of archaic fonts.
arev -- Fonts and LaTeX support files for Arev Sans.
ascii -- Support for IBM "standard ASCII" font.
astro -- Astronomical (planetary) symbols.
atqolive --
augie -- Calligraphic font for typesetting handwriting.
auncial-new -- Artificial Uncial font and LaTeX support macros.
aurical -- Calligraphic fonts for use with LaTeX in T1 encoding.
barcodes -- Fonts for making barcodes.
bayer -- Herbert Bayers Universal Font For Metafont.
bbding -- A symbol (dingbat) font and LaTeX macros for its use.
bbm -- "Blackboard-style" cm fonts.
bbm-macros -- LaTeX support for "blackboard-style" cm fonts.
bbold -- Sans serif blackboard bold.
belleek -- Free replacement for basic MathTime fonts.
bera -- Bera fonts.
blacklettert1 -- T1-encoded versions of Haralambous old German fonts.
boisik -- A font inspired by Baskerville design.
bookhands -- A collection of book-hand fonts.
braille -- Support for braille.
brushscr -- A handwriting script font.
calligra -- Calligraphic font.
carolmin-ps -- Adobe Type 1 format of Carolingian Minuscule fonts.
cherokee -- A font for the Cherokee script.
clarendo --
cm-lgc -- Type 1 CM-based fonts for Latin, Greek and Cyrillic.
cmbright -- Computer Modern Bright fonts.
cmll -- Symbols for linear logic.
cmpica -- A Computer Modern Pica variant.
coronet --
courier-scaled -- Provides a scaled Courier font.
cryst -- Font for graphical symbols used in crystallography.
cyklop -- The Cyclop typeface.
dancers -- Font for Conan Doyle's "The Dancing Men".
dice -- A font for die faces.
dictsym -- DictSym font and macro package
dingbat -- Two dingbat symbol fonts.
doublestroke -- Typeset mathematical double stroke symbols.
dozenal -- Typeset documents using base twelve numbering (also called
"dozenal")
duerer -- Computer Duerer fonts.
duerer-latex -- LaTeX support for the Duerer fonts.
ean -- Macros for making EAN barcodes.
ecc -- Sources for the European Concrete fonts.
eco -- Oldstyle numerals using EC fonts.
eiad -- Traditional style Irish fonts.
eiad-ltx -- LaTeX support for the eiad font.
elvish -- Fonts for typesetting Tolkien Elvish scripts.
epigrafica -- A Greek and Latin font.
epsdice -- A scalable dice "font".
esvect -- Vector arrows.
eulervm -- Euler virtual math fonts.
euxm --
feyn -- A font for in-text Feynman diagrams.
fge -- A font for Frege's Grundgesetze der Arithmetik.
foekfont -- The title font of the Mads Fok magazine.
fonetika -- Support for the danish "Dania" phonetic system.
fourier -- Using Utopia fonts in LaTeX documents.
fouriernc -- Use New Century Schoolbook text with Fourier maths fonts.
frcursive -- French cursive hand fonts.
garamond --
genealogy -- A compilation genealogy font.
gfsartemisia -- A modern Greek font design.
gfsbodoni -- A Greek and Latin font based on Bodoni.
gfscomplutum -- A Greek font with a long history.
gfsdidot -- A Greek font based on Didot's work.
gfsneohellenic -- A Greek font in the Neo-Hellenic style.
gfssolomos -- A Greek-alphabet font.
gothic -- A collection of old German-style fonts.
greenpoint -- The Green Point logo.
groff --
grotesq -- the URW Grotesk Bold Font.
hands -- Pointing hand font.
hfbright -- The hfbright fonts.
hfoldsty -- Old style numerals with EC fonts.
ifsym -- A collection of symbols.
inconsolata -- A monospaced font, with support files for use with TeX.
initials -- Adobe Type 1 decorative initial fonts.
iwona -- A two-element sans-serif font.
junicode -- A TrueType font for mediaevalists.
kixfont -- A font for KIX codes.
knuthotherfonts --
kpfonts -- A complete set of fonts for text and mathematics.
kurier -- A two-element sans-serif typeface.
lettrgth --
lfb -- A Greek font with normal and bold variants.
libertine -- Use the font Libertine with LaTeX.
libris -- Libris ADF fonts, with LaTeX support.
linearA -- Linear A script fonts.
logic -- A font for electronic logic design.
lxfonts -- Set of slide fonts based on CM.
ly1 -- Support for LY1 LaTeX encoding.
marigold --
mathabx -- Three series of mathematical symbols.
mathdesign -- Mathematical fonts to fit with particular text fonts.
mnsymbol -- Mathematical symbol font for Adobe MinionPro.
nkarta -- A "new" version of the karta cartographic fonts.
ocherokee -- LaTeX Support for the Cherokee language.
ogham -- Fonts for typesetting Ogham script.
oinuit -- LaTeX Support for the Inuktitut Language.
optima --
orkhun -- A font for orkhun script.
osmanian -- Osmanian font for writing Somali.
pacioli -- Fonts designed by Fra Luca de Pacioli in 1497.
pclnfss -- Font support for current PCL printers.
phaistos -- Disk of Phaistos font.
phonetic -- MetaFont Phonetic fonts, based on Computer Modern.
pigpen -- A font for the pigpen (or masonic) cipher.
psafm --
punk -- Donald Knuth's punk font.
recycle -- A font providing the "recyclable" logo.
sauter -- Wide range of design sizes for CM fonts.
sauterfonts -- Use sauter fonts in LaTeX.
semaphor -- Semaphore alphabet font.
simpsons -- MetaFont source for Simpsons characters.
skull -- A font to draw a skull.
staves -- Typeset Icelandic staves and runic letters.
tapir -- A simple geometrical font.
tengwarscript -- LaTeX support for using Tengwar fonts.
trajan -- Fonts from the Trajan column in Rome.
umtypewriter -- Fonts to typeset with the xgreek package.
univers --
universa -- Herbert Bayer's 'universal' font.
venturisadf -- Venturis ADF fonts collection.
wsuipa -- International Phonetic Alphabet fonts.
yfonts -- Support for old German fonts.
zefonts -- Virtual fonts to provide T1 encoding from existing fonts."""
EXAMPLE4 = """Arista is a simple multimedia transcoder, it focuses on \
being easy to use by making complex task of encoding for various devices \
simple.
Users should pick an input and a target device, choose a file to save to and \
go. Features:
* Presets for iPod, computer, DVD player, PSP, Playstation 3, and more.
* Live preview to see encoded quality.
* Automatically discover available DVD media and Video 4 Linux (v4l) devices.
* Rip straight from DVD media easily (requires libdvdcss).
* Rip straight from v4l devices.
* Simple terminal client for scripting.
* Automatic preset updating."""
def on_clicked(widget, desc_widget, descs):
widget.position += 1
if widget.position >= len(descs):
widget.position = 0
desc_widget.set_description(*descs[widget.position])
descs = ((EXAMPLE0, ''),
(EXAMPLE1, ''),
(EXAMPLE2, ''),
(EXAMPLE3, 'texlive-fonts-extra'),
(EXAMPLE4, ''))
win = Gtk.Window()
win.set_default_size(300, 400)
win.set_has_resize_grip(True)
vb = Gtk.VBox()
win.add(vb)
b = Gtk.Button('Next test description >>')
b.position = 0
vb.pack_start(b, False, False, 0)
scroll = Gtk.ScrolledWindow()
vb.add(scroll)
d = AppDescription()
#~ d.description.DEBUG_PAINT_BBOXES = True
d.set_description(EXAMPLE0, pkgname='')
scroll.add_with_viewport(d)
win.show_all()
b.connect("clicked", on_clicked, d, descs)
win.connect('destroy', lambda x: Gtk.main_quit())
return win
if __name__ == '__main__':
win = get_test_description_window()
win.show_all()
Gtk.main()
| 0.001587 |
# Wrapper module for waagent
#
# waagent is not written as a module. This wrapper module is created
# to use the waagent code as a module.
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import imp
import os
import os.path
#
# The following code will search and load waagent code and expose
# it as a submodule of current module
#
def searchWAAgent():
agentPath = '/usr/sbin/waagent'
if(os.path.isfile(agentPath)):
return agentPath
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
for user_path in user_paths:
agentPath = os.path.join(user_path, 'waagent')
if(os.path.isfile(agentPath)):
return agentPath
return None
agentPath = searchWAAgent()
if(agentPath):
waagent = imp.load_source('waagent', agentPath)
else:
raise Exception("Can't load waagent.")
if not hasattr(waagent, "AddExtensionEvent"):
"""
If AddExtensionEvent is not defined, provide a dummy impl.
"""
def _AddExtensionEvent(*args, **kwargs):
pass
waagent.AddExtensionEvent = _AddExtensionEvent
if not hasattr(waagent, "WALAEventOperation"):
class _WALAEventOperation:
HeartBeat = "HeartBeat"
Provision = "Provision"
Install = "Install"
UnIsntall = "UnInstall"
Disable = "Disable"
Enable = "Enable"
Download = "Download"
Upgrade = "Upgrade"
Update = "Update"
waagent.WALAEventOperation = _WALAEventOperation
__ExtensionName__ = None
def InitExtensionEventLog(name):
__ExtensionName__ = name
def AddExtensionEvent(name=__ExtensionName__,
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message=None):
if name is not None:
waagent.AddExtensionEvent(name=name,
op=op,
isSuccess=isSuccess,
message=message)
| 0.003167 |
# ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
# ########################################################################
# See mesh_exporter.py for explanation.
# ########################################################################
import bpy, mathutils
from ogre_mesh_exporter.log_manager import LogManager, Message
from operator import attrgetter
# Mesh export settings class to define how we are going to export the mesh.
class MeshExportSettings():
def __init__(self, fixUpAxisToY = True, requireMaterials = True, applyModifiers = False, skeletonNameFollowMesh = True, runOgreXMLConverter = True):
self.fixUpAxisToY = fixUpAxisToY
self.requireMaterials = requireMaterials
self.applyModifiers = applyModifiers
self.skeletonNameFollowMesh = skeletonNameFollowMesh
self.runOgreXMLConverter = runOgreXMLConverter
@classmethod
def fromRNA(cls, meshObject):
globalSettings = bpy.context.scene.ogre_mesh_exporter
meshSettings = meshObject.data.ogre_mesh_exporter
return MeshExportSettings(
fixUpAxisToY = globalSettings.fixUpAxisToY,
requireMaterials = meshSettings.requireMaterials if (meshSettings.requireMaterials_override) else globalSettings.requireMaterials,
applyModifiers = meshSettings.applyModifiers if (meshSettings.applyModifiers_override) else globalSettings.applyModifiers,
skeletonNameFollowMesh = meshSettings.skeletonNameFollowMesh if (meshSettings.skeletonNameFollowMesh_override) else globalSettings.skeletonNameFollowMesh,
runOgreXMLConverter = globalSettings.runOgreXMLConverter)
class BoneWeight():
def __init__(self, boneIndex, boneWeight):
self.mBoneIndex = boneIndex
self.mBoneWeight = boneWeight
class Vertex():
def __init__(self, pos, norm, uvs = list(), colors = list(), boneWeights = list()):
self.mPosition = pos
self.mNormal = norm
self.mUVs = uvs
self.mColors = colors
self.mBoneWeights = boneWeights
def match(self, norm, uvs, colors):
# Test normal.
if (self.mNormal != norm): return False;
# Test UVs.
if (len(self.mUVs) is not len(uvs)): return False
for uv1, uv2 in zip(self.mUVs, uvs):
if (uv1 != uv2): return False
# Test Colors.
if (len(self.mColors) is not len(colors)): return False
for color1, color2 in zip(self.mColors, colors):
if (color1 != color2): return False
return True
class VertexBuffer():
def __init__(self, uvLayers = 0, colorLayers = 0, hasBoneWeights = False):
# Vertex data.
self.mVertexData = list()
self.mUVLayers = uvLayers
self.mColorLayers = colorLayers
self.mHasBoneWeights = hasBoneWeights
# Blender mesh -> vertex index link.
# Only useful when exporting.
self.mMeshVertexIndexLink = dict()
def reset(self, uvLayers, colorLayers, hasBoneWeights = False):
self.mVertexData = list()
self.mUVLayers = uvLayers
self.mColorLayers = colorLayers
self.mHasBoneWeights = hasBoneWeights
def vertexCount(self):
return len(self.mVertexData)
# This method adds a vertex from the given blend mesh index into the buffer.
# If the uv information does not match the recorded vertex, it will automatically
# clone a new vertex for use.
def addVertex(self, index, pos, norm, uvs, colors, boneWeights = list(), fixUpAxisToY = True):
# Fix Up axis to Y (swap Y and Z and negate Z)
if (fixUpAxisToY):
pos = [pos[0], pos[2], -pos[1]]
norm = [norm[0], norm[2], -norm[1]]
# make sure uv layers and color layers matches as defined.
if (len(uvs) != self.mUVLayers or len(colors) != self.mColorLayers):
raise Exception("Invalid UV layer or Color layer count! Expecting uv(%d), color(%d). Got uv(%d), color(%d)" %
(self.mUVLayers, self.mColorLayers, len(uvs), len(colors)))
# try to find pre added vertex that matches criteria.
if (index in self.mMeshVertexIndexLink):
localIndexList = self.mMeshVertexIndexLink[index]
for localIndex in localIndexList:
if (self.mVertexData[localIndex].match(norm, uvs, colors)):
return localIndex
# nothing found. so we add a new vertex.
localIndex = len(self.mVertexData)
if (index not in self.mMeshVertexIndexLink): self.mMeshVertexIndexLink[index] = list()
self.mMeshVertexIndexLink[index].append(localIndex)
self.mVertexData.append(Vertex(pos, norm, uvs, colors, boneWeights))
return localIndex
def serialize(self, file, indent = ''):
extraAttributes = ''
uvLayerCount = 8 if (self.mUVLayers > 8) else self.mUVLayers
if (uvLayerCount > 0):
extraAttributes = ' texture_coords="%d"' % uvLayerCount
for i in range(uvLayerCount):
extraAttributes += ' texture_coord_dimensions_%d="float2"' % i
colorLayerCount = self.mColorLayers
if (colorLayerCount > 0): extraAttributes += ' colours_diffuse="true"'
if (colorLayerCount > 1): extraAttributes += ' colours_specular="true"'
file.write('%s<vertexbuffer positions="true" normals="true"%s>\n' % (indent, extraAttributes))
for vertex in self.mVertexData:
file.write('%s\t<vertex>\n' % indent)
# write position and normal.
file.write('%s\t\t<position x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mPosition[0], vertex.mPosition[1], vertex.mPosition[2]))
file.write('%s\t\t<normal x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mNormal[0], vertex.mNormal[1], vertex.mNormal[2]))
# write UV layers. (NOTE: Blender uses bottom left coord! Ogre uses top left! So we have to flip Y.)
for i in range(uvLayerCount):
uv = vertex.mUVs[i]
file.write('%s\t\t<texcoord u="%.6f" v="%.6f" />\n' % (indent, uv[0], (1.0 - uv[1])))
# write diffuse.
if (colorLayerCount > 0):
color = vertex.mColors[0]
file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
# write specular.
if (colorLayerCount > 1):
color = vertex.mColors[1]
file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
file.write('%s\t</vertex>\n' % indent)
file.write('%s</vertexbuffer>\n' % indent)
def serializeBoneAssignments(self, file, indent = ''):
file.write('%s\t<boneassignments>\n' % indent)
vertexWithNoBoneAssignements = 0;
for i, vertex in enumerate(self.mVertexData):
if (len(vertex.mBoneWeights) == 0): vertexWithNoBoneAssignements += 1
for boneWeight in vertex.mBoneWeights:
file.write('%s\t\t<vertexboneassignment vertexindex="%d" boneindex="%d" weight="%.6f" />\n' %
(indent, i, boneWeight.mBoneIndex, boneWeight.mBoneWeight))
if (vertexWithNoBoneAssignements > 0):
LogManager.logMessage("There are %d vertices with no bone assignements!" % vertexWithNoBoneAssignements, Message.LVL_WARNING)
file.write('%s\t</boneassignments>\n' % indent)
class SubMesh():
def __init__(self, vertexBuffer = None, meshVertexIndexLink = None, name = None):
# True if submesh is sharing vertex buffer.
self.mShareVertexBuffer = False
# Vertex buffer.
self.mVertexBuffer = vertexBuffer if (vertexBuffer) else VertexBuffer()
# Blender mesh -> local/shared vertex index link.
self.mMeshVertexIndexLink = meshVertexIndexLink if (meshVertexIndexLink) else dict()
# Face data.
self.mFaceData = list()
# Blender material.
self.mMaterial = None
# Name of submesh
self.mName = name
if ((vertexBuffer is not None) and (meshVertexIndexLink is not None)):
self.mShareVertexBuffer = True
def insertPolygon(self, blendMesh, polygon, blendVertexGroups = None, ogreSkeleton = None, fixUpAxisToY = True):
polygonVertices = polygon.vertices
polygonVertexCount = polygon.loop_total
# extract uv information.
# Here we convert blender uv data into our own
# uv information that lists uvs by vertices.
blendUVLoopLayers = blendMesh.uv_layers
# construct empty polygon vertex uv list.
polygonVertUVs = list()
for i in range(polygonVertexCount): polygonVertUVs.append(list())
for uvLoopLayer in blendUVLoopLayers:
for i, loopIndex in enumerate(polygon.loop_indices):
polygonVertUVs[i].append(uvLoopLayer.data[loopIndex].uv)
# extract color information.
# Here we convert blender color data into our own
# color information that lists colors by vertices.
blendColorLoopLayers = blendMesh.vertex_colors
# construct empty polygon vertex color list.
polygonVertColors = list()
for i in range(polygonVertexCount): polygonVertColors.append(list())
for colorLoopLayer in blendColorLoopLayers:
for i, loopIndex in enumerate(polygon.loop_indices):
polygonVertColors[i].append(colorLoopLayer.data[loopIndex].color)
# loop through the vertices and add to this submesh.
localIndices = list()
useSmooth = polygon.use_smooth
for index, uvs, colors in zip(polygonVertices, polygonVertUVs, polygonVertColors):
vertex = blendMesh.vertices[index]
norm = vertex.normal if (useSmooth) else polygon.normal
# grab bone weights.
boneWeights = list()
if (ogreSkeleton is not None):
for groupElement in vertex.groups:
groupName = blendVertexGroups[groupElement.group].name
boneIndex = ogreSkeleton.getBoneIndex(groupName)
if (boneIndex == -1 or abs(groupElement.weight) < 0.000001): continue
boneWeight = groupElement.weight
boneWeights.append(BoneWeight(boneIndex, boneWeight))
# trim bone weight count if too many defined.
if (len(boneWeights) > 4):
LogManager.logMessage("More than 4 bone weights are defined for a vertex! Best 4 will be used.", Message.LVL_WARNING)
boneWeights.sort(key=attrgetter('mBoneWeight'), reverse=True)
while (len(boneWeights) > 4): del boneWeights[-1]
localIndices.append(self.mVertexBuffer.addVertex(index, vertex.co, norm, uvs, colors, boneWeights, fixUpAxisToY))
# construct triangle index data.
if (polygonVertexCount is 3):
self.mFaceData.append(localIndices)
else:
# split quad into triangles.
self.mFaceData.append(localIndices[:3])
self.mFaceData.append([localIndices[0], localIndices[2], localIndices[3]])
def serialize(self, file):
vertexCount = self.mVertexBuffer.vertexCount()
materialAttribute = '' if (self.mMaterial is None) else ' material="%s"' % self.mMaterial.name
file.write('\t\t<submesh%s usesharedvertices="%s" use32bitindexes="%s">\n' %
(materialAttribute, 'true' if self.mShareVertexBuffer else 'false',
'true' if (vertexCount > 65536) else 'false'))
# write face data.
file.write('\t\t\t<faces count="%d">\n' % len(self.mFaceData))
for face in self.mFaceData:
file.write('\t\t\t\t<face v1="%d" v2="%d" v3="%d" />\n' % tuple(face))
file.write('\t\t\t</faces>\n')
# write submesh vertex buffer if not shared.
if (not self.mShareVertexBuffer):
file.write('\t\t\t<geometry vertexcount="%d">\n' % vertexCount)
self.mVertexBuffer.serialize(file, '\t\t\t\t')
file.write('\t\t\t</geometry>\n')
# write bone assignments
if (self.mShareVertexBuffer.mHasBoneWeights):
self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t\t')
file.write('\t\t</submesh>\n')
class Mesh():
def __init__(self, blendMesh = None, blendVertexGroups = None, ogreSkeleton = None, exportSettings = MeshExportSettings()):
# shared vertex buffer.
self.mSharedVertexBuffer = VertexBuffer()
# Blender mesh -> shared vertex index link.
self.mSharedMeshVertexIndexLink = dict()
# collection of submeshes.
self.mSubMeshDict = dict()
# skip blend mesh conversion if no blend mesh passed in.
if (blendMesh is None): return
self.mOgreSkeleton = ogreSkeleton
hasBoneWeights = ogreSkeleton is not None
# Lets do some pre checking to show warnings if needed.
uvLayerCount = len(blendMesh.uv_layers)
colorLayerCount = len(blendMesh.vertex_colors)
if (uvLayerCount > 8): LogManager.logMessage("More than 8 UV layers in this mesh. Only 8 will be exported.", Message.LVL_WARNING)
if (colorLayerCount > 2): LogManager.logMessage("More than 2 color layers in this mesh. Only 2 will be exported.", Message.LVL_WARNING)
# setup shared vertex buffer.
self.mSharedVertexBuffer.reset(uvLayerCount, colorLayerCount, hasBoneWeights)
# split up the mesh into submeshes by materials.
# we first get sub mesh shared vertices option.
materialList = blendMesh.materials
materialCount = len(materialList)
subMeshProperties = blendMesh.ogre_mesh_exporter.subMeshProperties
while (len(subMeshProperties) < materialCount): subMeshProperties.add() # add more items if needed.
while (len(subMeshProperties) > materialCount): subMeshProperties.remove(0) # remove items if needed.
LogManager.logMessage("Material Count: %d" % len(materialList), Message.LVL_INFO)
for polygon in blendMesh.polygons:
# get or create submesh.
if (polygon.material_index in self.mSubMeshDict):
subMesh = self.mSubMeshDict[polygon.material_index]
else:
# instantiate submesh base on wether sharing vertices or not.
subMeshProperty = subMeshProperties[polygon.material_index]
if (subMeshProperty.useSharedVertices):
subMesh = SubMesh(self.mSharedVertexBuffer, self.mSharedMeshVertexIndexLink, subMeshProperty.name)
else:
subMesh = SubMesh(VertexBuffer(uvLayerCount, colorLayerCount, hasBoneWeights), name = subMeshProperty.name)
subMesh.mMaterial = None if (len(materialList) == 0) else materialList[polygon.material_index]
if (exportSettings.requireMaterials and subMesh.mMaterial == None):
LogManager.logMessage("Some faces are not assigned with a material!", Message.LVL_WARNING)
LogManager.logMessage("To hide this warning, please uncheck the 'Require Materials' option.", Message.LVL_WARNING)
self.mSubMeshDict[polygon.material_index] = subMesh
# insert polygon.
subMesh.insertPolygon(blendMesh, polygon, blendVertexGroups, ogreSkeleton, exportSettings.fixUpAxisToY)
def serialize(self, file):
file.write('<mesh>\n')
# write shared vertex buffer if available.
sharedVertexCount = self.mSharedVertexBuffer.vertexCount()
if (sharedVertexCount > 0):
file.write('\t<sharedgeometry vertexcount="%d">\n' % sharedVertexCount)
self.mSharedVertexBuffer.serialize(file, '\t\t')
file.write('\t</sharedgeometry>\n')
# write bone assignments
if (self.mSharedVertexBuffer.mHasBoneWeights):
self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t')
subMeshNames = list()
# write submeshes.
file.write('\t<submeshes>\n')
for subMesh in self.mSubMeshDict.values():
name = subMesh.mName
if (name):
if (not name in subMeshNames):
subMeshNames.append(name)
else:
LogManager.logMessage("Mulitple submesh with same name defined: %s" % name, Message.LVL_WARNING)
subMesh.serialize(file)
file.write('\t</submeshes>\n')
# write submesh names
if (len(subMeshNames)):
file.write('\t<submeshnames>\n')
for index, name in enumerate(subMeshNames):
file.write('\t\t<submeshname name="%s" index="%d" />\n' % (name, index))
file.write('\t</submeshnames>\n')
# write skeleton link
if (self.mOgreSkeleton is not None):
file.write('\t<skeletonlink name="%s.skeleton" />\n' % self.mOgreSkeleton.mName)
file.write('</mesh>\n')
| 0.026057 |
"""
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from urlparse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
| 0.001621 |
"""
The :mod:`sklearn.utils` module includes various utilites.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array, check_arrays, safe_asarray,
assert_all_finite, array2d, atleast2d_or_csc,
atleast2d_or_csr, warn_if_not_float,
check_random_state)
from .class_weight import compute_class_weight
__all__ = ["murmurhash3_32", "as_float_array", "check_arrays", "safe_asarray",
"assert_all_finite", "array2d", "atleast2d_or_csc",
"atleast2d_or_csr", "warn_if_not_float", "check_random_state",
"compute_class_weight"]
# Make sure that DeprecationWarning get printed
warnings.simplefilter("always", DeprecationWarning)
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asanyarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
arrays = check_arrays(*arrays, sparse_format='csr')
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = safe_asarray(X)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_even_slices(n, n_packs):
"""Generator to create n_packs slices going up to n.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(Warning):
"Custom warning to capture convergence problems"
| 0 |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['FlashFileNaming']
import os, re
from pyasm.biz import FileNaming, Project, Snapshot, File
from pyasm.common import TacticException
class FlashFileNaming(FileNaming):
def add_ending(my, parts, auto_version=False):
context = my.snapshot.get_value("context")
version = my.snapshot.get_value("version")
version = "v%0.3d" % version
ext = my.get_ext()
# it is only unique if we use both context and version
parts.append(context)
parts.append(version)
filename = "_".join(parts)
filename = "%s%s" % (filename, ext)
# should I check if this filename is unique again?
return filename
# custom filename processing per sobject begins
def _get_unique_filename(my):
filename = my.file_object.get_full_file_name()
# find if this filename has been used for this project
file = File.get_by_filename(filename, skip_id=my.file_object.get_id())
if file:
root, ext = os.path.splitext(filename)
parts = [root]
filename = my.add_ending(parts, auto_version=True)
return filename
else:
return None
def flash_nat_pause(my):
return my._get_unique_filename()
def flash_final_wave(my):
return my._get_unique_filename()
| 0.002384 |
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
try:
# Setup default url resolver for this thread.
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| 0.002922 |
#!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
from datetime import datetime
import pytz
class DualEMATaLib(TradingAlgorithm):
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
def initialize(self, short_window=20, long_window=40):
# Add 2 mavg transforms, one with a long window, one
# with a short window.
self.short_ema_trans = EMA(timeperiod=short_window)
self.long_ema_trans = EMA(timeperiod=long_window)
# To keep track of whether we invested in the stock or not
self.invested = False
def handle_data(self, data):
self.short_ema = self.short_ema_trans.handle_data(data)
self.long_ema = self.long_ema_trans.handle_data(data)
if self.short_ema is None or self.long_ema is None:
return
self.buy = False
self.sell = False
if (self.short_ema > self.long_ema).all() and not self.invested:
self.order('AAPL', 100)
self.invested = True
self.buy = True
elif (self.short_ema < self.long_ema).all() and self.invested:
self.order('AAPL', -100)
self.invested = False
self.sell = True
self.record(AAPL=data['AAPL'].price,
short_ema=self.short_ema['AAPL'],
long_ema=self.long_ema['AAPL'],
buy=self.buy,
sell=self.sell)
if __name__ == '__main__':
start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(1991, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
dma = DualEMATaLib()
results = dma.run(data).dropna()
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='portfolio value')
results.portfolio_value.plot(ax=ax1)
ax2 = fig.add_subplot(212)
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
| 0.00031 |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 16