text
stringlengths 681
1.05M
| score
float64 0
0.27
|
---|---|
"""distutils.msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils import log
_can_read_reg = False
try:
import winreg
_can_read_reg = True
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
_can_read_reg = True
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegError = win32api.error
except ImportError:
log.info("Warning: Can't read registry to find the "
"necessary compiler setting\n"
"Make sure that Python modules winreg, "
"win32api or win32con are installed.")
pass
if _can_read_reg:
HKEYS = (hkey_mod.HKEY_USERS,
hkey_mod.HKEY_CURRENT_USER,
hkey_mod.HKEY_LOCAL_MACHINE,
hkey_mod.HKEY_CLASSES_ROOT)
def read_keys(base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
def read_values(base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i += 1
return d
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.load_macros(version)
def set_macro(self, macro, path, key):
for base in HKEYS:
d = read_values(base, path)
if d:
self.macros["$(%s)" % macro] = d[key]
break
def load_macros(self, version):
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
net = r"Software\Microsoft\.NETFramework"
self.set_macro("FrameworkDir", net, "installroot")
try:
if version > 7.0:
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
else:
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
except KeyError as exc: #
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2003;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2003 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = read_values(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
if majorVersion >= 13:
# v13 was skipped and should be v14
majorVersion += 1
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = get_build_version()
self.__arch = get_build_architecture()
if self.__arch == "Intel":
# x86
if self.__version >= 7:
self.__root = r"Software\Microsoft\VisualStudio"
self.__macros = MacroExpander(self.__version)
else:
self.__root = r"Software\Microsoft\Devstudio"
self.__product = "Visual Studio version %s" % self.__version
else:
# Win64. Assume this was built with the platform SDK
self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
self.initialized = False
def initialize(self):
self.__paths = []
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
self.__paths = self.get_msvc_paths("path")
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
self.set_path_env_var('lib')
self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "Intel":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
]
else:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
os.path.dirname(objects[0]),
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
"""Get a list of devstudio directories (include, lib or path).
Return a list of strings. The list will be empty if unable to
access the registry or appropriate registry keys not found.
"""
if not _can_read_reg:
return []
path = path + " dirs"
if self.__version >= 7:
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
% (self.__root, self.__version))
else:
key = (r"%s\6.0\Build System\Components\Platforms"
r"\Win32 (%s)\Directories" % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if self.__version >= 7:
return self.__macros.sub(d[path]).split(";")
else:
return d[path].split(";")
# MSVC 6 seems to create the registry entries we need only when
# the GUI is run.
if self.__version == 6:
for base in HKEYS:
if read_values(base, r"%s\6.0" % self.__root) is not None:
self.warn("It seems you have Visual Studio 6 installed, "
"but the expected registry settings are not present.\n"
"You must at least run the Visual Studio GUI once "
"so that these entries are created.")
break
return []
def set_path_env_var(self, name):
"""Set environment variable 'name' to an MSVC path type value.
This is equivalent to a SET command prior to execution of spawned
commands.
"""
if name == "lib":
p = self.get_msvc_paths("library")
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = ';'.join(p)
if get_build_version() >= 8.0:
log.debug("Importing new compiler from distutils.msvc9compiler")
OldMSVCCompiler = MSVCCompiler
from distutils.msvc9compiler import MSVCCompiler
# get_build_architecture not really relevant now we support cross-compile
from distutils.msvc9compiler import MacroExpander
| 0.002248 |
# PyVision License
#
# Copyright (c) 2006-2010 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Sep 6, 2010
@author: bolme
'''
import pyvision as pv
import numpy as np
import scipy as sp
import cv
import PIL.Image
import os.path
# Unlike many other graphics libraries PyVision does not have its own native
# pixel format. Instead it depends on other libraries to implement this array
# data, and PyVision acts as a translator. PyVision therefore provides a
# common framework that allows PIL, SciPy and OpenCV to work together to
# solve problems.
# This tutorial looks at pv.Image as a way to convert between types used
# in the PyVision library. pv.Images are used as a common format that can
# easily translate between PIL, Numpy, and OpenCV formats. Under the hood
# a pv.Image is represented by one of these formats, and the image is
# converted to new formats on demand.
# pv.Image implements a number of translation methods that allows images to
# be obtained in many useful formats. These methods are:
# * asMatrix2D() -> gray scale (one-channel) SciPy array
# * asMatrix3D() -> color (three-channel) SciPy array
# * asPIL() -> Python Imageing Library (PIL) image
# * asOpenCV() -> OpenCV Color Image (8-bit)
# * asOpenCVBW() -> OpenCV gray scale (Black/White) Image (8-bit)
# * asAnnotated() -> A PIL formated image which includes annotations made to this image.
#
# The constructor for pv.Images can also take any of these formats as
# arguments. Therefore converting between types can be done by code such as:
# OPENCV_IMAGE = pv.Image(NUMPY_MATIRX).asOpenCV()
# In this tutorial we will demonstraite how to use pv.Image to convert images
# to different formates and in each format we will perform a simple image
# processing task of thresholding an image to produce a black and white
# equivalent.
if __name__ == "__main__":
# Image logs are used to save images and other data to a directory
# for later analysis. These logs are valuable tools for understanding
# the imagery and debugging algorithms. Unless otherwise specified,
# ImageLogs are usually created in the directory "/tmp".
ilog = pv.ImageLog()
# Timers keep a record of the time required for algorithms to execute
# and help determine runtimes and can determine which parts of algorithms
# are to slow and need optimization.
timer = pv.Timer()
# The filename for the baboon image
filename = os.path.join(pv.__path__[0],'data','misc','baboon.jpg')
# If a string is passed a to the initializer it will assume that is a
# path and will read the image from that file. The image is usually read
# from disk using PIL and then stored as a PIL image.
im = pv.Image(filename)
# This command saves the image to an image log which provides good
# information for debugging. It is often helpful to save many images
# during a processing to make sure that each step is producing the
# intended result.
ilog(im,"OriginalImage")
# The PIL tool box supports man image processing and graphics functions.
# Typically PIL is used for things like reading in image files and
# rendering graphics on top of images for annotations. It tends to
# be slower than OpenCV and also lacks many of the more specialized
# computer vision algorithms.
# pv.Image objects are responsible for converting between image types.
# This next call returns an image in PIL format that can be used with
# the PIL library.
pil = im.asPIL()
# "mark" checks the "wall clock time" and logs program timing data.
timer.mark("Starting")
# Convert from RGB to gray scale
gray = pil.convert('L')
# This applys the "lambda" function to each pixel which performs
# thresholding. Processing each pixel with a python function is slow.
thresh = PIL.Image.eval(gray, lambda x: 255*(x>127.5) )
# Record the time for PIL thresholding.
timer.mark("PIL")
# pv.Images can also be initialized using a PIL image. This command
# also saves a copy of the image to the ImageLog.
ilog(pv.Image(thresh),"PILThresh")
# Scipy arrays are very easy to work with, and scipy has many image
# processing, linear algebra, and data analysis routines.
# "asMatrix2D" returns the 2D array containing the gray scale pixel
# values. The values in the matrix are indexed using standard pixel
# mat[x,y] coordinates and mat.shape = (w,h). This may be transposed
# from what some people might expect. Matricies are typically indexed
# using "rows" and "columns" so the matrix has been transposed to
# maintain the image-like x,y indexing. There is also a method
# called asMatrix3D() which returns color data in a 3D array.
# A pv.Image will often maintain multiple representations of an image.
# Calling a method like asMatrix2D will generate and return a Scipy format
# copy of the image. That copy will also be cached for future use so
# multiple calls to asMatrix2D will only generate that image for the
# first call, and all subsequent calls will return the cached copy.
mat = im.asMatrix2D()
timer.mark("Starting")
# Scipy syntax is often very simple and straight forward and fast.
# Because of this Scipy can be used to quickly prototype algorithms.
thresh = mat > 127.5
timer.mark("Scipy")
# pv.Image cannot be initialized using boolean data so "1.0*" converts
# to a floating point array.
ilog(pv.Image(1.0*thresh),"ScipyThresh")
# OpenCV code is often more complicated because images need to be
# allocated explicitly and function calls are more complicated, but
# the often executes faster than scipy or PIL. OpenCV also has many
# useful image processing, machine learning, and computer vision
# algorithms that are not found in scipy or PIL.
# This function returns an OpenCV gray scale image. There is also a
# function asOpenCV() which will return a color image.
cvim = im.asOpenCVBW()
timer.mark("Starting")
# OpenCV often requires manual image/data allocation which increases code
# complexity but also offers more control for performance tuning and memory
# management.
dest = cv.CreateImage(im.size,cv.IPL_DEPTH_8U,1)
# OpenCV has fast implementations for many common vision algorithms and
# image processing tasks. Syntax is often more confusing than PIL or Scipy.
cv.CmpS(cvim,127.5,dest,cv.CV_CMP_GT)
timer.mark("OpenCV")
# Like before we convert to a pv.Image and save the image to the log.
ilog(pv.Image(dest),"OpenCVThresh")
# The timer collects a record of the time taken for each operation
# and then displays that data in a nicely formated table. "Current"
# times are measured from the previous mark and show that Scipy
# and OpenCV are very fast at this particular task.
print timer
#|---|---------------|---------------|-------------------|-----------------|-------|
#| | event | time | current | total | notes |
#|---|---------------|---------------|-------------------|-----------------|-------|
#| 0 | Timer Created | 1283833886.12 | 0.0 | 0.0 | None |
#| 1 | Starting | 1283833886.21 | 0.0880968570709 | 0.0880968570709 | None |
#| 2 | PIL | 1283833886.21 | 0.0013279914856 | 0.0894248485565 | None |
#| 3 | Starting | 1283833886.25 | 0.0393881797791 | 0.128813028336 | None |
#| 4 | Scipy | 1283833886.25 | 0.000727891921997 | 0.129540920258 | None |
#| 5 | Starting | 1283833886.31 | 0.056421995163 | 0.185962915421 | None |
#| 6 | OpenCV | 1283833886.31 | 0.000664949417114 | 0.186627864838 | None |
#|---|---------------|---------------|-------------------|-----------------|-------|
# Timing data can also be saved to the log for further analysis
ilog(timer,"TimingData")
# This is a conveniant function that displays all images in the log.
ilog.show()
| 0.009701 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'setuppuloop.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SetupPULoop(object):
def setupUi(self, SetupPULoop):
SetupPULoop.setObjectName("SetupPULoop")
SetupPULoop.resize(387, 371)
self.verticalLayout = QtWidgets.QVBoxLayout(SetupPULoop)
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.formLayout_2.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName("formLayout_2")
self.label = QtWidgets.QLabel(SetupPULoop)
self.label.setObjectName("label")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.comboU = QtWidgets.QComboBox(SetupPULoop)
self.comboU.setObjectName("comboU")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.comboU)
self.label_2 = QtWidgets.QLabel(SetupPULoop)
self.label_2.setObjectName("label_2")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.comboP = QtWidgets.QComboBox(SetupPULoop)
self.comboP.setObjectName("comboP")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboP)
self.verticalLayout.addLayout(self.formLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout.setObjectName("horizontalLayout")
self.okButton = QtWidgets.QPushButton(SetupPULoop)
self.okButton.setObjectName("okButton")
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtWidgets.QPushButton(SetupPULoop)
self.cancelButton.setObjectName("cancelButton")
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(SetupPULoop)
QtCore.QMetaObject.connectSlotsByName(SetupPULoop)
def retranslateUi(self, SetupPULoop):
_translate = QtCore.QCoreApplication.translate
SetupPULoop.setWindowTitle(_translate("SetupPULoop", "Set up PU-Loop"))
self.label.setText(_translate("SetupPULoop", "Velocity (U)"))
self.label_2.setText(_translate("SetupPULoop", "Pressure (P)"))
self.okButton.setText(_translate("SetupPULoop", "Ok"))
self.cancelButton.setText(_translate("SetupPULoop", "Cancel"))
| 0.002186 |
import os
import sys
import bson
import pytest
@pytest.fixture(scope='function')
def database(mocker):
bin_path = os.path.join(os.getcwd(), 'bin')
mocker.patch('sys.path', [bin_path] + sys.path)
import database
return database
def test_42(data_builder, api_db, as_admin, database):
# Mimic old-style archived flag
session = data_builder.create_session()
session2 = data_builder.create_session()
api_db.sessions.update_one({'_id': bson.ObjectId(session)}, {'$set': {'archived': True}})
api_db.sessions.update_one({'_id': bson.ObjectId(session2)}, {'$set': {'archived': False}})
# Verfiy archived session is not hidden anymore
assert session in [s['_id'] for s in as_admin.get('/sessions').json()]
# Verify upgrade creates new-style hidden tag
database.upgrade_to_42()
session_data = as_admin.get('/sessions/' + session).json()
assert 'archived' not in session_data
assert 'hidden' in session_data['tags']
# Verify archived was removed when false as well
session_data = as_admin.get('/sessions/' + session2).json()
assert 'archived' not in session_data
def test_43(data_builder, api_db, as_admin, file_form, database):
# Create session and upload file for later use as analysis input
session = data_builder.create_session()
r = as_admin.post('/sessions/' + session + '/files', files=file_form('input.txt'))
assert r.ok
# Create ad-hoc analysis with input ref, then upload output
r = as_admin.post('/sessions/' + session + '/analyses', json={
'label': 'offline',
'inputs': [{'type': 'session', 'id': session, 'name': 'input.txt'}]
})
assert r.ok
analysis_id = r.json()['_id']
r = as_admin.post('/analyses/' + analysis_id + '/files', files=file_form('output.txt', meta=[{'name': 'output.txt'}]))
assert r.ok
# Mimic old-style analysis input/output tags
analysis = api_db.analyses.find_one({'_id': bson.ObjectId(analysis_id)}, ['inputs', 'files'])
for f in analysis['inputs']:
f['input'] = True
for f in analysis['files']:
f['output'] = True
api_db.analyses.update_one({'_id': bson.ObjectId(analysis_id)},
{'$set': {'files': analysis['inputs'] + analysis['files']},
'$unset': {'inputs': ''}})
# Verify upgrade gets rid of tags and separates inputs/files
database.upgrade_to_43()
analysis = as_admin.get('/analyses/' + analysis_id).json()
assert 'inputs' in analysis
assert len(analysis['inputs']) == 1
assert 'input' not in analysis['inputs'][0]
assert 'files' in analysis
assert len(analysis['files']) == 1
assert 'output' not in analysis['files'][0]
| 0.002561 |
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import socket
#########
# PATHS #
#########
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# openshift is our PAAS for now.
ON_PAAS = 'OPENSHIFT_REPO_DIR' in os.environ
if ON_PAAS:
SECRET_KEY = os.environ['OPENSHIFT_SECRET_TOKEN']
else:
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')_7av^!cy(wfx=k#3*7x+(=j^fzv+ot^1@sh9s9t=8$bu@r(z$'
# SECURITY WARNING: don't run with debug turned on in production!
# adjust to turn off when on Openshift, but allow an environment variable to override on PAAS
DEBUG = not ON_PAAS
DEBUG = DEBUG or 'DEBUG' in os.environ
if ON_PAAS and DEBUG:
print("*** Warning - Debug mode is on ***")
TEMPLATE_DEBUG = True
if ON_PAAS:
ALLOWED_HOSTS = [os.environ['OPENSHIFT_APP_DNS'], socket.gethostname()]
else:
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
# "mezzanine.mobile",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
ROOT_URLCONF = 'mysite.urls'
# Templates
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if ON_PAAS:
# determine if we are on MySQL or POSTGRESQL
if "OPENSHIFT_POSTGRESQL_DB_USERNAME" in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_POSTGRESQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_POSTGRESQL_DB_PORT'],
}
}
elif "OPENSHIFT_MYSQL_DB_USERNAME" in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_MYSQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_MYSQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_MYSQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_MYSQL_DB_PORT'],
}
}
else:
# stock django, local development.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'wsgi', 'static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, "static"),
)
# Start mezzanine settings.
TIME_ZONE = 'UTC'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', 'English'),
)
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
MEDIA_URL = STATIC_URL + "media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'wsgi', *MEDIA_URL.strip("/").split("/"))
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
exec(open(f, "rb").read())
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| 0.000264 |
## xmlstream.py
##
## Copyright (C) 2001 Matthew Allum
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published
## by the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
"""\
xmlstream.py provides simple functionality for implementing
XML stream based network protocols. It is used as a base
for jabber.py.
xmlstream.py manages the network connectivity and xml parsing
of the stream. When a complete 'protocol element' ( meaning a
complete child of the xmlstreams root ) is parsed the dipatch
method is called with a 'Node' instance of this structure.
The Node class is a very simple XML DOM like class for
manipulating XML documents or 'protocol elements' in this
case.
"""
# $Id: xmlstream.py 69 2005-09-05 14:44:19Z Zoomq $
import site
site.encoding = 'UTF-8'
import time, sys, re, socket
from select import select
from string import split,find,replace,join
import xml.parsers.expat
import traceback
VERSION = 0.3
False = 0
True = 1
TCP = 1
STDIO = 0
TCP_SSL = 2
#ENCODING = site.encoding
ENCODING = "utf-8"
BLOCK_SIZE = 1024 ## Number of bytes to get at at time via socket
## transactions
def XMLescape(txt):
"Escape XML entities"
txt = replace(txt, "&", "&")
txt = replace(txt, "<", "<")
txt = replace(txt, ">", ">")
return txt
def XMLunescape(txt):
"Unescape XML entities"
txt = replace(txt, "<", "<")
txt = replace(txt, ">", ">")
txt = replace(txt, "&", "&")
return txt
class error:
def __init__(self, value):
self.value = str(value)
def __str__(self):
return self.value
class Node:
"""A simple XML DOM like class"""
def __init__(self, tag='', parent=None, attrs=None ):
bits = split(tag)
if len(bits) == 1:
self.name = tag
self.namespace = ''
else:
self.namespace, self.name = bits
if attrs is None:
self.attrs = {}
else:
self.attrs = attrs
self.data = []
self.kids = []
self.parent = parent
def setParent(self, node):
"Set the nodes parent node."
self.parent = node
def getParent(self):
"return the nodes parent node."
return self.parent
def getName(self):
"Set the nodes tag name."
return self.name
def setName(self,val):
"Set the nodes tag name."
self.name = val
def putAttr(self, key, val):
"Add a name/value attribute to the node."
self.attrs[key] = val
def getAttr(self, key):
"Get a value for the nodes named attribute."
try: return self.attrs[key]
except: return None
def putData(self, data):
"Set the nodes textual data"
self.data.append(data)
def insertData(self, data):
"Set the nodes textual data"
self.data.append(data)
def getData(self):
"Return the nodes textual data"
return join(self.data, '')
def getDataAsParts(self):
"Return the node data as an array"
return self.data
def getNamespace(self):
"Returns the nodes namespace."
return self.namespace
def setNamespace(self, namespace):
"Set the nodes namespace."
self.namespace = namespace
def insertTag(self, name):
""" Add a child tag of name 'name' to the node.
Returns the newly created node.
"""
newnode = Node(tag=name, parent=self)
self.kids.append(newnode)
return newnode
def insertNode(self, node):
"Add a child node to the node"
self.kids.append(node)
return node
def insertXML(self, xml_str):
"Add raw xml as a child of the node"
newnode = NodeBuilder(xml_str).getDom()
self.kids.append(newnode)
return newnode
def __str__(self):
return self._xmlnode2str()
def _xmlnode2str(self, parent=None):
"""Returns an xml ( string ) representation of the node
and it children"""
s = "<" + self.name
if self.namespace:
if parent and parent.namespace != self.namespace:
s = s + " xmlns = '%s' " % self.namespace
for key in self.attrs.keys():
val = str(self.attrs[key])
s = s + " %s='%s'" % ( key, XMLescape(val) )
s = s + ">"
cnt = 0
if self.kids != None:
for a in self.kids:
if (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
s = s + a._xmlnode2str(parent=self)
cnt=cnt+1
if (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
s = s + "</" + self.name + ">"
return s
def getTag(self, name):
"""Returns a child node with tag name. Returns None
if not found."""
for node in self.kids:
if node.getName() == name:
return node
return None
def getTags(self, name):
"""Like getTag but returns a list with matching child nodes"""
nodes=[]
for node in self.kids:
if node.getName() == name:
nodes.append(node)
return nodes
def getChildren(self):
"""Returns a nodes children"""
return self.kids
class NodeBuilder:
"""builds a 'minidom' from data parsed to it. Primarily for insertXML
method of Node"""
def __init__(self,data):
self._parser = xml.parsers.expat.ParserCreate(namespace_separator=' ')
self._parser.StartElementHandler = self.unknown_starttag
self._parser.EndElementHandler = self.unknown_endtag
self._parser.CharacterDataHandler = self.handle_data
self.__depth = 0
self.__done = 0 #needed ?
self.__space_regex = re.compile('^\s+$')
self._parser.Parse(data,1)
def unknown_starttag(self, tag, attrs):
self.__depth = self.__depth + 1
if self.__depth == 1:
self._mini_dom = Node(tag=tag, attrs=attrs)
self._ptr = self._mini_dom
elif self.__depth > 1:
self._ptr.kids.append(Node(tag =tag,
parent=self._ptr,
attrs =attrs ))
self._ptr = self._ptr.kids[-1]
else: ## fix this ....
pass
def unknown_endtag(self, tag ):
self.__depth = self.__depth - 1
if self.__depth == 0:
self.dispatch(self._mini_dom)
elif self.__depth > 0:
self._ptr = self._ptr.parent
else:
pass
def handle_data(self, data):
if not self.__space_regex.match(data): ## check its not all blank
self._ptr.data.append(data)
def dispatch(self,dom):
self.__done = 1
def getDom(self):
return self._mini_dom
class Stream:
def __init__(
self, host, port, namespace,
debug=True,
log=None,
sock=None,
id=None,
connection=TCP
):
self._parser = xml.parsers.expat.ParserCreate(namespace_separator=' ')
self._parser.StartElementHandler = self._unknown_starttag
self._parser.EndElementHandler = self._unknown_endtag
self._parser.CharacterDataHandler = self._handle_data
self._host = host
self._port = port
self._namespace = namespace
self.__depth = 0
self._sock = sock
self._sslObj = None
self._sslIssuer = None
self._sslServer = None
self._incomingID = None
self._outgoingID = id
self._debug = debug
self._connection=connection
self.DEBUG("stream init called")
if log:
if type(log) is type(""):
try:
self._logFH = open(log,'w')
except:
print "ERROR: can open %s for writing"
sys.exit(0)
else: ## assume its a stream type object
self._logFH = log
else:
self._logFH = None
self._timestampLog = True
def timestampLog(self,timestamp):
""" Enable or disable the showing of a timestamp in the log.
By default, timestamping is enabled.
"""
self._timestampLog = timestamp
def DEBUG(self,txt):
if self._debug:
sys.stderr.write(("DEBUG: %s\n" % txt).encode('utf-8'))
def getSocket(self):
return self._sock
def header(self):
self.DEBUG("stream: sending initial header")
str = u"<?xml version='1.0' encoding='UTF-8' ?> \
<stream:stream to='%s' xmlns='%s'" % ( self._host,
self._namespace )
if self._outgoingID: str = str + " id='%s' " % self._outgoingID
str = str + " xmlns:stream='http://etherx.jabber.org/streams'>"
self.write (str)
self.read()
def _handle_data(self, data):
"""XML Parser callback"""
self.DEBUG("data-> " + data)
## TODO: get rid of empty space
## self._ptr.data = self._ptr.data + data
self._ptr.data.append(data)
def _unknown_starttag(self, tag, attrs):
"""XML Parser callback"""
self.__depth = self.__depth + 1
self.DEBUG("DEPTH -> %i , tag -> %s, attrs -> %s" % \
(self.__depth, tag, str(attrs)) )
if self.__depth == 2:
self._mini_dom = Node(tag=tag, attrs=attrs)
self._ptr = self._mini_dom
elif self.__depth > 2:
self._ptr.kids.append(Node(tag=tag,parent=self._ptr,attrs=attrs))
self._ptr = self._ptr.kids[-1]
else: ## it the stream tag:
if attrs.has_key('id'):
self._incomingID = attrs['id']
def _unknown_endtag(self, tag ):
"""XML Parser callback"""
self.__depth = self.__depth - 1
self.DEBUG("DEPTH -> %i" % self.__depth)
if self.__depth == 1:
self.dispatch(self._mini_dom)
elif self.__depth > 1:
self._ptr = self._ptr.parent
else:
self.DEBUG("*** Server closed connection ? ****")
def dispatch(self, nodes, depth = 0):
"""Overide with the method you want to called with
a node structure of a 'protocol element."""
padding = ' '
padding = padding * depth
depth = depth + 1
for n in nodes:
if n.kids != None:
self.dispatch(n.kids, depth)
##def syntax_error(self, message):
## self.DEBUG("error " + message)
def read(self):
"""Reads incoming data. Called by process() so nonblocking"""
data = u''
data_in = u''
if self._connection == TCP:
try:
data2=self._sock.recv(BLOCK_SIZE)
data_in = data_in + \
unicode(data2,'utf-8').encode(ENCODING,
'replace')
except:
print `data2`
raise
while data_in:
data = data + data_in
if len(data_in) != BLOCK_SIZE:
break
data_in = unicode(self._sock.recv(BLOCK_SIZE),'utf-8').encode(
ENCODING, 'replace')
if self._connection == TCP_SSL:
data_in=data_in+self._sslObj.read(BLOCK_SIZE).decode('utf-8')
while data_in:
data = data + data_in
if len(data_in) != BLOCK_SIZE:
break
data_in = self._sslObj.read(BLOCK_SIZE).decode('utf-8')
elif self._connection == STDIO:
## Hope this dont buffer !
data_in = data_in + unicode(sys.stdin.read(1024),'utf-8').encode(
ENCODING, 'replace')
while data_in:
data = data + data_in
if len(data_in) != 1024:
break
data_in = unicode(sys.stdin.read(1024),'utf-8').encode(
ENCODING, 'replace')
else:
pass # should never get here
self.DEBUG("got data %s" % data )
self.log(data, 'RECV:')
data=data.encode('utf-8')
self._parser.Parse(data)
return data
def write(self,data_out=u''):
"""Writes raw outgoing data. blocks"""
try:
if self._connection == TCP:
self._sock.send (data_out.encode('utf-8'))
elif self._connection == TCP_SSL:
self._sslObj.write(data_out.encode('utf-8'))
elif self._connection == STDIO:
self.stdout.write(data_out.encode('utf-8'))
else:
pass
self.log(data_out, 'SENT:')
self.DEBUG("sent %s" % data_out)
except:
traceback.print_exc()
self.DEBUG("xmlstream write threw error")
self.disconnected()
def process(self,timeout):
reader=Node
if self._connection == TCP:
reader = self._sock
elif self._connection == TCP_SSL:
reader = self._sock
elif self._connection == STDIO:
reader = sys.stdin
else:
pass
ready_for_read,ready_for_write,err = \
select( [reader],[],[],timeout)
for s in ready_for_read:
if s == reader:
if not len(self.read()): # length of 0 means disconnect
## raise error("network error") ?
self.disconnected()
return False
return True
return False
def disconnect(self):
"""Close the stream and socket"""
time.sleep(1) ## sleep for a second - server bug ? ##
self.write ( "</stream:stream>" )
self._sock.close()
self._sock = None
def disconnected(self): ## To be overidden ##
"""Called when a Network Error or disconnection occurs.
Designed to be overidden"""
self.DEBUG("Network Disconnection")
pass
def log(self, data, inout=u''):
"""Logs data to the specified filehandle. Data is time stamped
and prefixed with inout"""
if self._logFH is not None:
if self._timestampLog:
ts=time.asctime(time.localtime(time.time()))
inout=inout.encode('ascii','replace')
data=data.encode('ascii','replace')
self._logFH.write("%s - %s - %s\n" % (ts,inout,data))
else:
self._logFH.write((u"%s - %s\n" % (inout, data )).encode('utf-8') )
def getIncomingID(self):
"""Returns the streams ID"""
return self._incomingID
def getOutgoingID(self):
"""Returns the streams ID"""
return self._incomingID
class Client(Stream):
def connect(self):
"""Attempt to connect to specified host"""
self.DEBUG("client connect called to %s %s type %i" % (self._host,
self._port,
self._connection) )
## TODO: check below that stdin/stdout are actually open
if self._connection == STDIO: return
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self._sock.connect(("talk.google.com", self._port))
except socket.error, e:
self.DEBUG("socket error")
raise error(e)
if self._connection == TCP_SSL:
try:
self.DEBUG("Attempting to create ssl socket")
self._sslObj = socket.ssl( self._sock, None, None )
self._sslIssuer = self._sslObj.issuer()
self._sslServer = self._sslObj.server()
except:
self.DEBUG("Socket Error: No SSL Support")
raise error("No SSL Support")
self.DEBUG("connected")
self.header()
return 0
class Server:
def now(self): return time.ctime(time.time())
def __init__(self, maxclients=10):
self.host = ''
self.port = 5222
self.streams = []
# make main sockets for accepting new client requests
self.mainsocks, self.readsocks, self.writesocks = [], [], []
self.portsock = socket(AF_INET, SOCK_STREAM)
self.portsock.bind((self.host, self.port))
self.portsock.listen(maxclients)
self.mainsocks.append(self.portsock) # add to main list to identify
self.readsocks.append(self.portsock) # add to select inputs list
# event loop: listen and multiplex until server process killed
def serve(self):
print 'select-server loop starting'
while 1:
print "LOOPING"
readables, writeables, exceptions = select(self.readsocks,
self.writesocks, [])
for sockobj in readables:
if sockobj in self. mainsocks: # for ready input sockets
newsock, address = sockobj.accept() # accept not block
print 'Connect:', address, id(newsock)
self.readsocks.append(newsock)
self._makeNewStream(newsock)
# add to select list, wait
else:
# client socket: read next line
data = sockobj.recv(1024)
# recv should not block
print '\tgot', data, 'on', id(sockobj)
if not data: # if closed by the clients
sockobj.close() # close here and remv from
self.readsocks.remove(sockobj)
else:
# this may block: should really select for writes too
sockobj.send('Echo=>%s' % data)
def _makeNewStream(self, sckt):
new_stream = Stream('localhost', 5222,
'jabber:client',
sock=sckt)
self.streams.append(new_stream)
## maybe overide for a 'server stream'
new_stream.header()
return new_stream
def _getStreamSockets(self):
socks = [];
for s in self.streams:
socks.append(s.getSocket())
return socks
def _getStreamFromSocket(self, sock):
for s in self.streams:
if s.getSocket() == sock:
return s
return None
| 0.012893 |
import ray
class ActorPool:
"""Utility class to operate on a fixed pool of actors.
Arguments:
actors (list): List of Ray actor handles to use in this pool.
Examples:
>>> a1, a2 = Actor.remote(), Actor.remote()
>>> pool = ActorPool([a1, a2])
>>> print(list(pool.map(lambda a, v: a.double.remote(v),\
... [1, 2, 3, 4])))
[2, 4, 6, 8]
"""
def __init__(self, actors):
# actors to be used
self._idle_actors = list(actors)
# get actor from future
self._future_to_actor = {}
# get future from index
self._index_to_future = {}
# next task to do
self._next_task_index = 0
# next task to return
self._next_return_index = 0
# next work depending when actors free
self._pending_submits = []
def map(self, fn, values):
"""Apply the given function in parallel over the actors and values.
This returns an ordered iterator that will return results of the map
as they finish. Note that you must iterate over the iterator to force
the computation to finish.
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectRef computing the result over the value. The
actor will be considered busy until the ObjectRef completes.
values (list): List of values that fn(actor, value) should be
applied to.
Returns:
Iterator over results from applying fn to the actors and values.
Examples:
>>> pool = ActorPool(...)
>>> print(list(pool.map(lambda a, v: a.double.remote(v),\
... [1, 2, 3, 4])))
[2, 4, 6, 8]
"""
for v in values:
self.submit(fn, v)
while self.has_next():
yield self.get_next()
def map_unordered(self, fn, values):
"""Similar to map(), but returning an unordered iterator.
This returns an unordered iterator that will return results of the map
as they finish. This can be more efficient that map() if some results
take longer to compute than others.
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectRef computing the result over the value. The
actor will be considered busy until the ObjectRef completes.
values (list): List of values that fn(actor, value) should be
applied to.
Returns:
Iterator over results from applying fn to the actors and values.
Examples:
>>> pool = ActorPool(...)
>>> print(list(pool.map_unordered(lambda a, v: a.double.remote(v),\
... [1, 2, 3, 4])))
[6, 2, 4, 8]
"""
for v in values:
self.submit(fn, v)
while self.has_next():
yield self.get_next_unordered()
def submit(self, fn, value):
"""Schedule a single task to run in the pool.
This has the same argument semantics as map(), but takes on a single
value instead of a list of values. The result can be retrieved using
get_next() / get_next_unordered().
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectRef computing the result over the value. The
actor will be considered busy until the ObjectRef completes.
value (object): Value to compute a result for.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> pool.submit(lambda a, v: a.double.remote(v), 2)
>>> print(pool.get_next(), pool.get_next())
2, 4
"""
if self._idle_actors:
actor = self._idle_actors.pop()
future = fn(actor, value)
self._future_to_actor[future] = (self._next_task_index, actor)
self._index_to_future[self._next_task_index] = future
self._next_task_index += 1
else:
self._pending_submits.append((fn, value))
def has_next(self):
"""Returns whether there are any pending results to return.
Returns:
True if there are any pending results not yet returned.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> print(pool.has_next())
True
>>> print(pool.get_next())
2
>>> print(pool.has_next())
False
"""
return bool(self._future_to_actor)
def get_next(self, timeout=None):
"""Returns the next pending result in order.
This returns the next result produced by submit(), blocking for up to
the specified timeout until it is available.
Returns:
The next result.
Raises:
TimeoutError if the timeout is reached.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> print(pool.get_next())
2
"""
if not self.has_next():
raise StopIteration("No more results to get")
if self._next_return_index >= self._next_task_index:
raise ValueError("It is not allowed to call get_next() after "
"get_next_unordered().")
future = self._index_to_future[self._next_return_index]
if timeout is not None:
res, _ = ray.wait([future], timeout=timeout)
if not res:
raise TimeoutError("Timed out waiting for result")
del self._index_to_future[self._next_return_index]
self._next_return_index += 1
i, a = self._future_to_actor.pop(future)
self._return_actor(a)
return ray.get(future)
def get_next_unordered(self, timeout=None):
"""Returns any of the next pending results.
This returns some result produced by submit(), blocking for up to
the specified timeout until it is available. Unlike get_next(), the
results are not always returned in same order as submitted, which can
improve performance.
Returns:
The next result.
Raises:
TimeoutError if the timeout is reached.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> pool.submit(lambda a, v: a.double.remote(v), 2)
>>> print(pool.get_next_unordered())
4
>>> print(pool.get_next_unordered())
2
"""
if not self.has_next():
raise StopIteration("No more results to get")
# TODO(ekl) bulk wait for performance
res, _ = ray.wait(
list(self._future_to_actor), num_returns=1, timeout=timeout)
if res:
[future] = res
else:
raise TimeoutError("Timed out waiting for result")
i, a = self._future_to_actor.pop(future)
self._return_actor(a)
del self._index_to_future[i]
self._next_return_index = max(self._next_return_index, i + 1)
return ray.get(future)
def _return_actor(self, actor):
self._idle_actors.append(actor)
if self._pending_submits:
self.submit(*self._pending_submits.pop(0))
| 0 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.language = ['en']
self.domains = ['moviexk.com']
self.base_link = 'http://moviexk.com'
self.search_link = 'aHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vY3VzdG9tc2VhcmNoL3YxZWxlbWVudD9rZXk9QUl6YVN5Q1ZBWGlVelJZc01MMVB2NlJ3U0cxZ3VubU1pa1R6UXFZJnJzej1maWx0ZXJlZF9jc2UmbnVtPTEwJmhsPWVuJmN4PTAxMzQ0NjM1MTYzMDQ5MzU5NTE5Nzprc2NlY2tjdXZxcyZnb29nbGVob3N0PXd3dy5nb29nbGUuY29tJnE9JXM='
def movie(self, imdb, title, year):
try:
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title), year)
url = client.request(url, output='geturl')
if url == None: raise Exception()
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
try:
t = cleantitle.get(title)
q = '%s %s' % (title, year)
q = self.search_link.decode('base64') % urllib.quote_plus(q)
r = client.request(q, error=True)
r = json.loads(r)['results']
r = [(i['url'], i['titleNoFormatting']) for i in r]
r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)\((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [(urllib.unquote_plus(i[0]), i[1], i[2]) for i in r]
r = [(urlparse.urlparse(i[0]).path, i[1], i[2]) for i in r]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
r = re.sub('/watch-movie-|-\d+$', '/', r[0][0].strip())
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
t = cleantitle.get(tvshowtitle)
q = '%s %s' % (tvshowtitle, year)
q = self.search_link.decode('base64') % urllib.quote_plus(q)
r = client.request(q)
r = json.loads(r)['results']
r = [(i['url'], i['titleNoFormatting']) for i in r]
r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)$', i[1])) for i in r]
r = [(i[0], i[1][0].rsplit('TV Series')[0].strip('(')) for i in r if i[1]]
r = [(urllib.unquote_plus(i[0]), i[1]) for i in r]
r = [(urlparse.urlparse(i[0]).path, i[1]) for i in r]
r = [i for i in r if t == cleantitle.get(i[1])]
r = urlparse.urljoin(self.base_link, r[0][0].strip())
if '/watch-movie-' in r: r = re.sub('/watch-movie-|-\d+$', '/', r)
y = re.findall('(\d{4})', r)
if y:
y = y[0]
else:
y = client.request(r)
y = re.findall('(?:D|d)ate\s*:\s*(\d{4})', y)[0]
if not year == y: raise Exception()
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = '%s?season=%01d&episode=%01d' % (url, int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
f = urlparse.urljoin(self.base_link, url)
url = f.rsplit('?', 1)[0]
r = client.request(url, mobile=True)
r = client.parseDOM(r, 'div', attrs = {'id': 'servers'})
r = client.parseDOM(r, 'li')
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
try:
s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0]
e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0]
r = [(i[0], re.findall('(\d+)', i[1])) for i in r]
r = [(i[0], '%01d' % int(i[1][0]), '%01d' % int(i[1][1])) for i in r if len(i[1]) > 1]
r = [i[0] for i in r if s == i[1] and e == i[2]]
except:
r = [i[0] for i in r]
for u in r:
try:
url = client.request(u, mobile=True)
url = client.parseDOM(url, 'source', ret='src')
url = [i.strip().split()[0] for i in url]
for i in url:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Moviexk', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| 0.008331 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class RedTubeIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.redtube.com/66418',
'md5': '7b8c22b5e7098a3e1c09709df1126d2d',
'info_dict': {
'id': '66418',
'ext': 'mp4',
'title': 'Sucked on a toilet',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
video_url = self._html_search_regex(
r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
video_title = self._html_search_regex(
r'<h1 class="videoTitle[^"]*">(.+?)</h1>',
webpage, 'title')
video_thumbnail = self._og_search_thumbnail(webpage)
# No self-labeling, but they describe themselves as
# "Home of Videos Porno"
age_limit = 18
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': video_title,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
}
| 0.001376 |
from ...abstract_osid.osid import markers as abc_osid_markers
from .. import settings
class OsidPrimitive(abc_osid_markers.OsidPrimitive):
"""A marker interface for an interface that behaves like a language
primitive.
"""
class Identifiable(abc_osid_markers.Identifiable):
"""A marker interface for objects uniquely identified with an OSID Id."""
# The authority for all identifiers can be set in settings.py:
# Disregard and delete as necessary:
_authority = 'DLKit' + settings.HOST
def get_id(self):
from .. import primitives
"""Gets the Id associated with this instance of this OSID object."""
# return primitives.Id(identifier = self._my_map['id'],
# namespace = self._namespace,
# authority = self._authority)
return primitives.Id(idstr=self._my_map['id'])
def is_current(self):
"""Tests to see if the last method invoked retrieved up-to-date
data."""
return self.my_map['current']
id_ = property(get_id)
ident = property(get_id)
class Extensible(abc_osid_markers.Extensible):
"""A marker interface for objects that contain OsidRecords."""
def __init__(self): # This will never get called :)
from ..type.objects import TypeList
self._record_types = TypeList([])
def __iter__(self):
for attr in dir(self):
if not attr.startswith('__'):
yield attr
def __getitem__(self, item):
return getattr(self, item)
def __getattribute__(self, name):
if not name.startswith('_'):
if '_records' in self.__dict__:
for record in self._records:
try:
return self._records[record][name]
except AttributeError:
pass
return object.__getattribute__(self, name)
def __getattr__(self, name):
if '_records' in self.__dict__:
for record in self._records:
try:
return self._records[record][name]
except AttributeError:
pass
raise AttributeError()
def get_record_types(self):
"""Gets the record types available in this object."""
return self._record_types
"""
if self._my_extension_map is None:
self._my_extension_map = self._get_extension_map()
type_list = []
for type_id in self._my_extension_map['recordTypeIds']:
url_path = '/handcar/services/learning/types/' + type_id
type_list.append(self._get_request(url_path))
return TypeList(type_list)"""
def has_record_type(self, record_type=None):
"""Tests if this object supports the given record Type."""
return record_type in self._record_types
record_types = property(get_record_types)
class Browsable(abc_osid_markers.Browsable):
"""A marker interface for objects that offer property inspection."""
def get_properties(self):
"""Gets a list of properties."""
pass
def get_properties_by_record_type(self, record_type=None):
"""Gets a list of properties corresponding to the specified record
type."""
pass
properties = property(get_properties)
class Suppliable(abc_osid_markers.Suppliable):
pass
class Temporal(abc_osid_markers.Temporal):
"""``Temporal`` is used to indicate the object endures for a period of time."""
def is_effective(self):
"""Tests if the current date is within the start end end dates inclusive.
return: (boolean) - ``true`` if this is effective, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
pass
def get_start_date(self):
"""Gets the start date.
return: (osid.calendaring.DateTime) - the start date
*compliance: mandatory -- This method must be implemented.*
"""
pass
def get_end_date(self):
"""Gets the end date.
return: (osid.calendaring.DateTime) - the end date
*compliance: mandatory -- This method must be implemented.*
"""
pass
class Aggregateable(abc_osid_markers.Aggregateable):
"""Aggregateable is used for an OsidObject to indicate that some or all
of the definition is based on an included set of other OsidObjects
which are directly accessible and do not exist outside the context
of the parent object.
Aggregateables allow for an OsidObject to stand alone without
knowledge of the originating service.
An Asset is an example of an aggregate by including the
AssetContents. An Asset also contains a provider however in this
case the provider is categorized as a simple data attribute of the
Asset that can be changed by updating the Asset using an AssetForm.
The AssetContent differs in there exists a explicit mapping to the
Asset managed through an OsidSession but accessible directly within
the Asset to enable its consumption outside the Repository OSID.
This marker has little practicality other than to identify a service
pattern that is neither a data attribute nor a separately accessible
relationship or mapping.
"""
class Sourceable(abc_osid_markers.Sourceable):
"""Sourceble is used for OsidObjects where information about a provider
is appropriate."""
def get_provider_id(self):
"""Gets the Id of the provider."""
pass
def get_provider(self):
"""Gets the Resource representing the provider."""
pass
def get_branding_ids(self):
"""Gets the branding asset ``Ids``."""
pass
def get_branding(self):
"""Gets a branding, such as an image or logo, expressed using the
Asset interface."""
pass
def get_license(self):
"""Gets the terms of usage."""
pass
provider_id = property(get_provider_id)
provider = property(get_provider)
branding_ids = property(get_branding_ids)
branding = property(get_branding)
license = property(get_license)
class Federateable(abc_osid_markers.Federateable):
"""Federateable is used to indicate an OsidObject can be federated
using the OSID Hierarchy pattern."""
| 0.000316 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import decimal
import math
import re
from oslo_utils import units
import six
from django.conf import settings
from django.contrib.auth import logout # noqa
from django import http
from django.utils.encoding import force_text
from django.utils.functional import lazy # noqa
from django.utils import translation
def _lazy_join(separator, strings):
return separator.join([force_text(s)
for s in strings])
lazy_join = lazy(_lazy_join, six.text_type)
def bytes_to_gigabytes(bytes):
# Converts the number of bytes to the next highest number of Gigabytes
# For example 5000000 (5 Meg) would return '1'
return int(math.ceil(float(bytes) / units.Gi))
def add_logout_reason(request, response, reason):
# Store the translated string in the cookie
lang = translation.get_language_from_request(request)
with translation.override(lang):
reason = six.text_type(reason).encode('utf-8')
response.set_cookie('logout_reason', reason, max_age=10)
def logout_with_message(request, msg, redirect=True):
"""Send HttpResponseRedirect to LOGOUT_URL.
`msg` is a message displayed on the login page after the logout, to explain
the logout reason.
"""
logout(request)
if redirect:
response = http.HttpResponseRedirect(
'%s?next=%s' % (settings.LOGOUT_URL, request.path))
else:
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
add_logout_reason(request, response, msg)
return response
def get_page_size(request, default=20):
session = request.session
cookies = request.COOKIES
try:
page_size = int(session.get('horizon_pagesize',
cookies.get('horizon_pagesize',
getattr(settings,
'API_RESULT_PAGE_SIZE',
default))))
except ValueError:
page_size = session['horizon_pagesize'] = int(default)
return page_size
def get_log_length(request, default=35):
session = request.session
cookies = request.COOKIES
try:
log_length = int(session.get(
'instance_log_length',
cookies.get('instance_log_length',
getattr(settings,
'INSTANCE_LOG_LENGTH',
default))))
except ValueError:
log_length = session['instance_log_length'] = int(default)
return log_length
def natural_sort(attr):
return lambda x: [int(s) if s.isdigit() else s for s in
re.split(r'(\d+)', getattr(x, attr, x))]
def get_keys(tuple_of_tuples):
"""Processes a tuple of 2-element tuples and returns a tuple containing
first component of each tuple.
"""
return tuple([t[0] for t in tuple_of_tuples])
def value_for_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the value
corresponding to the given key. If not value is found, the key is returned.
"""
for t in tuple_of_tuples:
if t[0] == key:
return t[1]
else:
return key
def next_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
after the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i + 1][0]
except IndexError:
return None
def previous_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
before the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i - 1][0]
except IndexError:
return None
def format_value(value):
"""Returns the given value rounded to one decimal place if it is a
decimal, or integer if it is an integer.
"""
value = decimal.Decimal(str(value))
if int(value) == value:
return int(value)
return round(value, 1)
| 0.000212 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Chi2 distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import gamma
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
__all__ = [
"Chi2",
"Chi2WithAbsDf",
]
class Chi2(gamma.Gamma):
"""Chi2 distribution.
The Chi2 distribution is defined over positive real numbers using a degrees of
freedom ("df") parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; df, x > 0) = x**(0.5 df - 1) exp(-0.5 x) / Z
Z = 2**(0.5 df) Gamma(0.5 df)
```
where:
* `df` denotes the degrees of freedom,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The Chi2 distribution is a special case of the Gamma distribution, i.e.,
```python
Chi2(df) = Gamma(concentration=0.5 * df, rate=0.5)
```
"""
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2"):
"""Construct Chi2 distributions with parameter `df`.
Args:
df: Floating point tensor, the degrees of freedom of the
distribution(s). `df` must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
# Even though all stats of chi2 are defined for valid parameters, this is
# not true in the parent class "gamma." therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[df]):
self._df = ops.convert_to_tensor(df, name="df")
super(Chi2, self).__init__(
concentration=0.5 * self._df,
rate=constant_op.constant(0.5, dtype=self._df.dtype),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"df": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def df(self):
return self._df
class Chi2WithAbsDf(Chi2):
"""Chi2 with parameter transform `df = floor(abs(df))`."""
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2WithAbsDf"):
parameters = locals()
with ops.name_scope(name, values=[df]):
super(Chi2WithAbsDf, self).__init__(
df=math_ops.floor(
math_ops.abs(df, name="abs_df"),
name="floor_abs_df"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| 0.002869 |
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Package uploading functions.
# Package info checking routines.
#
import os
import string
import sys
from spacewalk.common.usix import TupleType
from spacewalk.common.usix import raise_with_tb
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.RPC_Base import RPC_Base
from spacewalk.server import rhnSQL, rhnPackageUpload, rhnUser, rhnSession
from spacewalk.server.importlib.backendOracle import SQLBackend
from spacewalk.server.importlib.importLib import Collection, IncompatibleArchError,\
Channel, IncompletePackage, InvalidChannelError
from spacewalk.server.importlib.packageImport import ChannelPackageSubscription
from spacewalk.server.importlib.packageUpload import uploadPackages, listChannels, listChannelsSource, listChannelsChecksum
from spacewalk.server.importlib.userAuth import UserAuth
from spacewalk.server.importlib.errataCache import schedule_errata_cache_update
# 12/22/05 wregglej 173287
# I made a decent number of changes to this file to implement session authentication.
# One of the requirements for this was to maintain backwards compatibility, so older
# versions of rhnpush can still talk to a newer satellite. This meant that I had to
# add new versions of each XMLRPC call that did authentication by sessions rather than
# username/password. I noticed that the only real difference between the two was the
# authentication scheme that the functions used, so rather than copy-n-paste a bunch of code,
# I separated the functionality from the authentication and just pass a authentication object
# to the function that actually does stuff.
class Packages(RPC_Base):
def __init__(self):
log_debug(3)
RPC_Base.__init__(self)
self.functions.append('uploadPackageInfo')
self.functions.append('uploadPackageInfoBySession')
self.functions.append('uploadSourcePackageInfo')
self.functions.append('uploadSourcePackageInfoBySession')
self.functions.append('listChannel')
self.functions.append('listChannelBySession')
self.functions.append('listChannelChecksum')
self.functions.append('listChannelChecksumBySession')
self.functions.append('listChannelSource')
self.functions.append('listChannelSourceBySession')
self.functions.append('listMissingSourcePackages')
self.functions.append('listMissingSourcePackagesBySession')
self.functions.append('channelPackageSubscription')
self.functions.append('channelPackageSubscriptionBySession')
self.functions.append('no_op')
self.functions.append('test_login')
self.functions.append('test_new_login')
self.functions.append('test_check_session')
self.functions.append('login')
self.functions.append('check_session')
self.functions.append('getPackageChecksum')
self.functions.append('getPackageChecksumBySession')
self.functions.append('getSourcePackageChecksum')
self.functions.append('getSourcePackageChecksumBySession')
# old MD5 compatibility functions
self.functions.append('getPackageMD5sum')
self.functions.append('getPackageMD5sumBySession')
self.functions.append('getSourcePackageMD5sum')
self.functions.append('getSourcePackageMD5sumBySession')
def no_op(self):
""" This is so the client can tell if the satellite supports session tokens or not.
This was used in rhnpush-5.5.26 and older. When there will be no such version of rhnpush in wild,
then this function can be safely removed."""
return 1
def uploadPackageInfo(self, username, password, info):
""" Upload a collection of binary packages. """
log_debug(5, username, info)
authobj = auth(username, password)
return self._uploadPackageInfo(authobj, info)
def uploadPackageInfoBySession(self, session_string, info):
log_debug(5, session_string)
authobj = auth_session(session_string)
return self._uploadPackageInfo(authobj, info)
def _uploadPackageInfo(self, authobj, info):
# Authorize the org id passed
authobj.authzOrg(info)
# Get the channels
channels = info.get('channels')
if channels:
authobj.authzChannels(channels)
force = 0
if 'force' in info:
force = info['force']
return uploadPackages(info, force=force,
caller="server.app.uploadPackageInfo")
def uploadSourcePackageInfo(self, username, password, info):
""" Upload a collection of source packages. """
log_debug(5, username, info)
authobj = auth(username, password)
return self._uploadSourcePackageInfo(authobj, info)
def uploadSourcePackageInfoBySession(self, session_string, info):
log_debug(5, session_string)
authobj = auth_session(session_string)
return self._uploadSourcePackageInfo(authobj, info)
def _uploadSourcePackageInfo(self, authobj, info):
# Authorize the org id passed
authobj.authzOrg(info)
force = 0
if 'force' in info:
force = info['force']
return uploadPackages(info, source=1, force=force,
caller="server.app.uploadSourcePackageInfo")
def listChannelSource(self, channelList, username, password):
log_debug(5, channelList, username)
authobj = auth(username, password)
return self._listChannelSource(authobj, channelList)
def listChannelSourceBySession(self, channelList, session_string):
log_debug(5, channelList, session_string)
authobj = auth_session(session_string)
return self._listChannelSource(authobj, channelList)
def _listChannelSource(self, authobj, channelList):
authobj.authzChannels(channelList)
ret = listChannelsSource(channelList)
return ret
def listChannel(self, channelList, username, password):
""" List packages of a specified channel. """
log_debug(5, channelList, username)
authobj = auth(username, password)
return self._listChannel(authobj, channelList)
def listChannelBySession(self, channelList, session_string):
log_debug(5, channelList, session_string)
authobj = auth_session(session_string)
return self._listChannel(authobj, channelList)
def _listChannel(self, authobj, channelList):
authobj.authzChannels(channelList)
return listChannels(channelList)
def listChannelChecksum(self, channelList, username, password):
""" List packages of a specified channel. """
log_debug(5, channelList, username)
authobj = auth(username, password)
return self._listChannelChecksum(authobj, channelList)
def listChannelChecksumBySession(self, channelList, session_string):
log_debug(5, channelList, session_string)
authobj = auth_session(session_string)
return self._listChannelChecksum(authobj, channelList)
def _listChannelChecksum(self, authobj, channelList):
authobj.authzChannels(channelList)
return listChannelsChecksum(channelList)
def login(self, username, password):
""" This function that takes in the username
and password and returns a session string if they are correct. It raises a
rhnFault if the user/pass combo is not acceptable.
"""
log_debug(5, username)
user = rhnUser.search(username)
if not user or not user.check_password(password):
raise rhnFault(2)
if rhnUser.is_user_read_only(user.username):
raise rhnFault(702)
session = user.create_session()
return session.get_session()
def check_session(self, session):
""" Checks a session string to make sure it is authentic expired. """
try:
user = rhnUser.session_reload(session)
except (rhnSession.InvalidSessionError, rhnSession.ExpiredSessionError):
return 0
return 1
def test_login(self, username, password):
log_debug(5, username)
try:
authobj = auth(username, password)
except:
return 0
return 1
def test_new_login(self, username, password, session=None):
""" rhnpush's --extended-test will call this function. """
log_debug(5, "testing new login")
return self.login(username, password)
def test_check_session(self, session):
""" rhnpush's --extended-test will call this function. """
log_debug(5, "testing check session")
return self.check_session(session)
###listMissingSourcePackages###
def listMissingSourcePackages(self, channelList, username, password):
""" List source packages for a list of channels. """
log_debug(5, channelList, username)
authobj = auth(username, password)
return self._listMissingSourcePackages(authobj, channelList)
def listMissingSourcePackagesBySession(self, channelList, session_string):
log_debug(5, channelList, session_string)
authobj = auth_session(session_string)
return self._listMissingSourcePackages(authobj, channelList)
def _listMissingSourcePackages(self, authobj, channelList):
authobj.authzChannels(channelList)
h = rhnSQL.prepare("""
select distinct sr.name source_rpm
from rhnChannel c
join rhnChannelNewestPackage cnp
on cnp.channel_id = c.id
join rhnPackage p
on cnp.package_id = p.id
join rhnSourceRPM sr
on p.source_rpm_id = sr.id
left join rhnPackageSource ps
on p.source_rpm_id = ps.source_rpm_id
and (p.org_id = ps.org_id or
(p.org_id is null and ps.org_id is null)
)
where c.label = :channel_label
and ps.source_rpm_id is null
""")
missing_packages = []
for c in channelList:
h.execute(channel_label=c)
while 1:
row = h.fetchone_dict()
if not row:
break
missing_packages.append([row['source_rpm'], c])
return missing_packages
def channelPackageSubscription(self, username, password, info):
""" Uploads an RPM package. """
log_debug(3)
authobj = auth(username, password)
return self._channelPackageSubscription(authobj, info)
def channelPackageSubscriptionBySession(self, session_string, info):
log_debug(3, info)
authobj = auth_session(session_string)
return self._channelPackageSubscription(authobj, info)
def _channelPackageSubscription(self, authobj, info):
# Authorize the org id passed
authobj.authzOrg(info)
packageList = info.get('packages') or []
if not packageList:
log_debug(1, "No packages found; done")
return 0
if 'channels' not in info or not info['channels']:
log_debug(1, "No channels found; done")
return 0
channelList = info['channels']
authobj.authzChannels(channelList)
# Have to turn the channel list into a list of Channel objects
channelList = [Channel().populate({'label': x}) for x in channelList]
# Since we're dealing with superusers, we allow them to change the org
# id
# XXX check if we don't open ourselves too much (misa 20030422)
org_id = info.get('orgId')
if org_id == '':
org_id = None
batch = Collection()
package_keys = ['name', 'version', 'release', 'epoch', 'arch']
for package in packageList:
for k in package_keys:
if k not in package:
raise Exception("Missing key %s" % k)
if k == 'epoch':
if package[k] is not None:
if package[k] == '':
package[k] = None
else:
package[k] = str(package[k])
else:
package[k] = str(package[k])
if package['arch'] == 'src' or package['arch'] == 'nosrc':
# Source package - no reason to continue
continue
_checksum_sql_filter = ""
if 'md5sum' in package: # for old rhnpush compatibility
package['checksum_type'] = 'md5'
package['checksum'] = package['md5sum']
exec_args = {
'name': package['name'],
'pkg_epoch': package['epoch'],
'pkg_version': package['version'],
'pkg_rel': package['release'],
'pkg_arch': package['arch'],
'orgid': org_id
}
if 'checksum' in package and CFG.ENABLE_NVREA:
_checksum_sql_filter = """and c.checksum = :checksum
and c.checksum_type = :checksum_type"""
exec_args.update({'checksum_type': package['checksum_type'],
'checksum': package['checksum']})
h = rhnSQL.prepare(self._get_pkg_info_query %
_checksum_sql_filter)
h.execute(**exec_args)
row = h.fetchone_dict()
package['checksum_type'] = row['checksum_type']
package['checksum'] = row['checksum']
package['org_id'] = org_id
package['channels'] = channelList
batch.append(IncompletePackage().populate(package))
caller = "server.app.channelPackageSubscription"
backend = SQLBackend()
importer = ChannelPackageSubscription(batch, backend, caller=caller)
try:
importer.run()
except IncompatibleArchError:
e = sys.exc_info()[1]
raise_with_tb(rhnFault(50, string.join(e.args), explain=0), sys.exc_info()[2])
except InvalidChannelError:
e = sys.exc_info()[1]
raise_with_tb(rhnFault(50, str(e), explain=0), sys.exc_info()[2])
affected_channels = importer.affected_channels
log_debug(3, "Computing errata cache for systems affected by channels",
affected_channels)
schedule_errata_cache_update(affected_channels)
rhnSQL.commit()
return 0
def getAnyChecksum(self, info, username=None, password=None, session=None, is_source=0):
""" returns checksum info of available packages
also does an existance check on the filesystem.
"""
log_debug(3)
pkg_infos = info.get('packages')
channels = info.get('channels', [])
force = info.get('force', 0)
orgid = info.get('org_id')
if orgid == 'null':
null_org = 1
else:
null_org = None
if not session:
org_id, force = rhnPackageUpload.authenticate(username, password,
channels=channels,
null_org=null_org,
force=force)
else:
try:
org_id, force = rhnPackageUpload.authenticate_session(
session, channels=channels, null_org=null_org, force=force)
except rhnSession.InvalidSessionError:
raise_with_tb(rhnFault(33), sys.exc_info()[2])
except rhnSession.ExpiredSessionError:
raise_with_tb(rhnFault(34), sys.exc_info()[2])
if is_source:
ret = self._getSourcePackageChecksum(org_id, pkg_infos)
else:
ret = self._getPackageChecksum(org_id, pkg_infos)
return ret
def getPackageChecksum(self, username, password, info):
return self.getAnyChecksum(info, username=username, password=password)
def getPackageMD5sum(self, username, password, info):
""" bug#177762 gives md5sum info of available packages.
also does an existance check on the filesystem.
"""
log_debug(3)
self._MD5sum2Checksum_info(info)
return self._Checksum2MD5sum_list(
self.getPackageChecksum(username, password, info))
def getPackageChecksumBySession(self, session_string, info):
return self.getAnyChecksum(info, session=session_string)
def getPackageMD5sumBySession(self, session_string, info):
log_debug(3)
self._MD5sum2Checksum_info(info)
return self._Checksum2MD5sum_list(
self.getPackageChecksumBySession(session_string, info))
_get_pkg_info_query = """
select
c.checksum_type,
c.checksum,
p.path path
from
rhnPackageEVR pe,
rhnPackageName pn,
rhnPackage p,
rhnPackageArch pa,
rhnChecksumView c
where
pn.name = :name
and ( pe.epoch = :pkg_epoch or
( pe.epoch is null and :pkg_epoch is null )
)
and pe.version = :pkg_version
and pe.release = :pkg_rel
and ( p.org_id = :orgid or
( p.org_id is null and :orgid is null )
)
and p.name_id = pn.id
and p.evr_id = pe.id
and p.package_arch_id = pa.id
and pa.label = :pkg_arch
and p.checksum_id = c.id
%s
"""
def _getPackageChecksum(self, org_id, pkg_infos):
log_debug(3)
row_list = {}
checksum_exists = 0
for pkg in pkg_infos.keys():
pkg_info = pkg_infos[pkg]
pkg_epoch = pkg_info['epoch']
if pkg_epoch is not None:
# Force empty strings to None (NULLs in database)
if pkg_epoch == '':
pkg_epoch = None
# and force numbers to strings
else:
pkg_epoch = str(pkg_epoch)
query_args = {
'name': pkg_info['name'],
'pkg_epoch': pkg_epoch,
'pkg_version': str(pkg_info['version']),
'pkg_rel': str(pkg_info['release']),
'pkg_arch': pkg_info['arch'],
'orgid': org_id,
}
_checksum_sql_filter = ""
if 'checksum' in pkg_info and CFG.ENABLE_NVREA:
_checksum_sql_filter = """and c.checksum = :checksum
and c.checksum_type = :checksum_type"""
query_args.update({
'checksum_type': pkg_info['checksum_type'],
'checksum': pkg_info['checksum'],
})
h = rhnSQL.prepare(self._get_pkg_info_query % _checksum_sql_filter)
row_list[pkg] = self._get_package_checksum(h, query_args)
return row_list
def _get_package_checksum(self, h, query_args):
h.execute(**query_args)
row = h.fetchone_dict()
if not row:
ret = ''
elif row.get('path'):
filePath = os.path.join(CFG.MOUNT_POINT, row['path'])
if os.access(filePath, os.R_OK):
if 'checksum' in row:
ret = (row['checksum_type'], row['checksum'])
else:
ret = 'on-disk'
else:
# Package not found on the filesystem
log_error("Package not found", filePath)
ret = ''
else:
log_error("Package path null for package", query_args['name'])
ret = ''
return ret
def _MD5sum2Checksum_info(self, info):
log_debug(5)
pkg_infos = info.get('packages')
for pkg in pkg_infos.keys():
if 'md5sum' in pkg_infos[pkg]:
pkg_infos[pkg]['checksum_type'] = 'md5'
pkg_infos[pkg]['checksum'] = pkg_infos[pkg]['md5sum']
del(pkg_infos[pkg]['md5sum'])
def _Checksum2MD5sum_list(self, checksum_list):
log_debug(5)
row_list = {}
for k in checksum_list.keys():
if checksum_list[k] == '' or checksum_list[k] == 'on-disk':
row_list[k] = checksum_list[k]
elif type(checksum_list[k]) == TupleType and checksum_list[k][0] == 'md5':
row_list[k] = checksum_list[k][1]
else:
row_list[k] = ''
return row_list
def getSourcePackageChecksum(self, username, password, info):
return self.getAnyChecksum(info, username=username, password=password, is_source=1)
def getSourcePackageMD5sum(self, username, password, info):
log_debug(3)
self._MD5sum2Checksum_info(info)
return self._Checksum2MD5sum_list(
self.getSourcePackageChecksum(username, password, info))
def getSourcePackageChecksumBySession(self, session_string, info):
return self.getAnyChecksum(info, session=session_string, is_source=1)
def getSourcePackageMD5sumBySession(self, session_string, info):
log_debug(3)
self._MD5sum2Checksum_info(info)
return self._Checksum2MD5sum_list(
self.getSourcePackageChecksumBySession(session_string, info))
def _getSourcePackageChecksum(self, org_id, pkg_infos):
""" Gives checksum info of available source packages.
Also does an existance check on the filesystem.
"""
log_debug(3)
statement = """
select
ps.path path,
c.checksum,
c.checksum_type
from
rhnSourceRpm sr,
rhnPackageSource ps,
rhnChecksumView c
where
sr.name = :name
and ps.source_rpm_id = sr.id
and ( ps.org_id = :orgid or
( ps.org_id is null and :orgid is null )
)
and ps.checksum_id = c.id
"""
h = rhnSQL.prepare(statement)
row_list = {}
for pkg in pkg_infos.keys():
row_list[pkg] = self._get_package_checksum(h,
{'name': pkg, 'orgid': org_id})
return row_list
def auth(login, password):
""" Authorize this user. """
authobj = UserAuth()
authobj.auth(login, password)
return authobj
def auth_session(session_string):
""" Authenticate based on a session. """
authobj = UserAuth()
authobj.auth_session(session_string)
return authobj
| 0.000931 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------- #
# #
# Plugin for iSida Jabber Bot #
# Copyright (C) diSabler <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# --------------------------------------------------------------------------- #
# cron * * * * * \n command
def time_cron(type, jid, nick, text):
ar = text.split(' ',1)[0].lower()
try: par = text.split(' ',1)[1]
except: par = ''
if ar == 'show': msg = time_cron_show(jid,nick,par)
elif ar == 'del': msg = time_cron_del(jid,nick,par)
else: msg = time_cron_add(text,jid,nick)
send_msg(type, jid, nick, msg)
def time_cron_show(jid,nick,ar):
al = get_level(jid,nick)[0]
if ar:
if al == 9 and ar.lower().split()[0] in ['all','global','total']: room = '%'
else: room = jid
else: room = jid
c = cur_execute_fetchall('select * from cron where room ilike %s order by room, time',(room,))
if c:
tmp, idx = [], 1
for t in c:
if t[2] == '\n': mode = '[silent]'
elif not t[2]: mode = '[anonim]'
else: mode = ''
if room=='%': msg = '%s|%s' % (t[0],disp_time(t[3],'%s/%s'%(jid,nick)))
else: msg = '%s. %s' % (idx,disp_time(t[3],'%s/%s'%(jid,nick)))
if mode: msg = '%s %s' % (msg,mode)
if t[4]: msg += ' [%s]' % t[4]
msg += ' -> %s' % t[5]
tmp.append(msg)
idx += 1
return L('Cron rules:\n%s','%s/%s'%(jid,nick)) % '\n'.join(tmp)
else: return L('There is no cron rules.','%s/%s'%(jid,nick))
def time_cron_add(ar,jid,nick):
try: cron_time, cron_cmd = ar.split('\n',1)
except: return L('Not enough parameters!','%s/%s'%(jid,nick))
try:
SM,RM,NM = 'silent','once','anonim'
CMD = [SM,RM,NM]
ct = cron_time.lower().split()
rm,sm,nm = RM in ct,SM in ct,NM in ct
if rm or sm or nm:
for t in CMD:
try: ct.remove(t)
except: pass
ct = ' '.join(ct)
if rm: cron_time,repeat_time = ct,''
else: cron_time,repeat_time = ct,ct
next_time = crontab.CronTab(cron_time).next() + time.time()
except: return L('Error in time format!','%s/%s'%(jid,nick))
lvl,rj = get_level(jid,nick)
amm,tcmd = -1,cron_cmd.split(' ')[0]
for tmp in comms:
if tmp[1] == tcmd:
amm = tmp[0]
break
if amm < 0: return L('Command not found: %s','%s/%s'%(jid,nick)) % tcmd
elif amm > lvl: return L('Not allowed launch: %s','%s/%s'%(jid,nick)) % tcmd
else:
if sm: nick = '\n'
elif nm: nick = ''
cur_execute('insert into cron values (%s,%s,%s,%s,%s,%s,%s)', (jid,getRoom(rj),nick,next_time,repeat_time,cron_cmd,lvl))
return '%s -> %s' % (disp_time(next_time,'%s/%s'%(jid,nick)),cron_cmd)
def time_cron_del(jid,nick,ar):
al = get_level(jid,nick)[0]
if al == 9 and ar.lower() == 'all':
cur_execute('delete from cron where room=%s',(jid,))
return L('All cron rules removed!','%s/%s'%(jid,nick))
elif not ar: return L('Need choise record number.','%s/%s'%(jid,nick))
elif not ar.isdigit(): return L('Record ID is numeric.','%s/%s'%(jid,nick))
else:
c = cur_execute_fetchall('select * from cron where room ilike %s order by room, time',(jid,))
try: rec = c[int(ar)-1]
except: return L('Record #%s not found!','%s/%s'%(jid,nick)) % ar
cur_execute('delete from cron where room=%s and jid=%s and nick=%s and time=%s and repeat=%s and command=%s and level=%s',rec)
msg = disp_time(rec[3],'%s/%s'%(jid,nick))
if rec[2] == '\n': mode = '[silent]'
elif not rec[2]: mode = '[anonim]'
else: mode = ''
if mode: msg = '%s %s' % (msg,mode)
if rec[4]: msg += ' [%s]' % rec[4]
msg += ' -> %s' % rec[5]
return L('Removed: %s','%s/%s'%(jid,nick)) % msg
def cron_action():
itt = int(time.time())
c = cur_execute_fetchall('select distinct * from cron where %s >= time',(itt,))
if c:
cur_execute('delete from cron where %s >= time',(itt,))
for t in c:
if t[4]:
tm = crontab.CronTab(t[4]).next() + time.time()
m = list(t[:3]) + [tm] + list(t[4:7])
cur_execute('insert into cron values (%s,%s,%s,%s,%s,%s,%s)', m)
tmp = cur_execute_fetchone('select room from conference where room ilike %s',('%s/%%'%t[0],))
if not tmp:
pprint('Can\'t execute by cron: %s in %s' % (t[5].split()[0],t[0]),'red')
return
else: nowname = getResourse(tmp[0])
if t[2] == '\n': tmp_nick,tmp_type = '%s_cron_%d' % (nowname,time.time()),'chat'
else: tmp_nick,tmp_type = t[2],'groupchat'
com_parser(t[6], nowname, tmp_type, t[0], tmp_nick, t[5], Settings['jid'])
global execute, timer
timer = [cron_action]
execute = [(7, 'cron', time_cron, 2, 'Execute command by cron. Used unix-type time format.\ncron [once] [anonim|silent] * * * * *\ncommand')]
| 0.044571 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems
from aquilon.aqdb.model import Archetype, Feature
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.parameter import (search_path_in_personas,
lookup_paramdef)
from aquilon.worker.formats.parameter import SimpleParameterList
class CommandSearchParameter(BrokerCommand):
required_parameters = ['path']
def render(self, session, archetype, feature, type, path, **_):
if archetype:
defholder = Archetype.get_unique(session, archetype, compel=True)
else:
cls = Feature.polymorphic_subclass(type, "Unknown feature type")
defholder = cls.get_unique(session, name=feature, compel=True)
db_paramdef, rel_path = lookup_paramdef(defholder, path, strict=False)
params = search_path_in_personas(session, db_paramdef, rel_path)
return SimpleParameterList(path, iteritems(params))
| 0 |
# -*- coding: utf-8 -*-
from itertools import chain
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.forms.widgets import Select, MultiWidget, TextInput
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from cms.forms.utils import get_site_choices, get_page_choices
from cms.models import Page, PageUser
from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE
from cms.utils.compat.dj import force_unicode
class PageSelectWidget(MultiWidget):
"""A widget that allows selecting a page by first selecting a site and then
a page on that site in a two step process.
"""
def __init__(self, site_choices=None, page_choices=None, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.choices = []
super(PageSelectWidget, self).__init__((Select, Select, Select), attrs)
def decompress(self, value):
"""
receives a page_id in value and returns the site_id and page_id
of that page or the current site_id and None if no page_id is given.
"""
if value:
page = Page.objects.get(pk=value)
site = page.site
return [site.pk, page.pk, page.pk]
site = Site.objects.get_current()
return [site.pk,None,None]
def _has_changed(self, initial, data):
# THIS IS A COPY OF django.forms.widgets.Widget._has_changed()
# (except for the first if statement)
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None or (len(data)>=2 and data[1] in [None,'']):
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
site_choices = get_site_choices()
page_choices = get_page_choices()
self.site_choices = site_choices
self.choices = page_choices
self.widgets = (Select(choices=site_choices ),
Select(choices=[('', '----')]),
Select(choices=self.choices, attrs={'style': "display:none;"} ),
)
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PageSmartLinkWidget(TextInput):
def render(self, name=None, value=None, attrs=None):
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
output = [r'''<script type="text/javascript">
(function($){
$(function(){
$("#%(element_id)s").select2({
placeholder: "%(placeholder_text)s",
minimumInputLength: 3,
ajax: {
url: "%(ajax_url)s",
dataType: 'json',
data: function (term, page) {
return {
q: term, // search term
language_code: '%(language_code)s'
};
},
results: function (data, page) {
return {
more: false,
results: $.map(data, function(item, i){
return {
'id':item.redirect_url,
'text': item.title + ' (/' + item.path + ')'}
}
)
};
}
},
// Allow creation of new entries
createSearchChoice:function(term, data) { if ($(data).filter(function() { return this.text.localeCompare(term)===0; }).length===0) {return {id:term, text:term};} },
multiple: false,
initSelection : function (element, callback) {
var initialValue = element.val()
callback({id:initialValue, text: initialValue});
}
});
})
})(django.jQuery);
</script>''' % {
'element_id': id_,
'placeholder_text': final_attrs.get('placeholder_text', ''),
'language_code': self.language,
'ajax_url': reverse("admin:cms_page_get_published_pagelist")
}]
output.append(super(PageSmartLinkWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class Media:
css = {
'all': ('cms/js/select2/select2.css',
'cms/js/select2/select2-bootstrap.css',)
}
js = (#'cms/js/libs/jquery.min.js',
'cms/js/select2/select2.js',)
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(PageUser._meta.app_label + '.' + PageUser._meta.get_add_permission())):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class AppHookSelect(Select):
"""Special widget used for the App Hook selector in the Advanced Settings
of the Page Admin. It adds support for a data attribute per option and
includes supporting JS into the page.
"""
class Media:
js = ('cms/js/modules/cms.base.js', 'cms/js/modules/cms.app_hook_select.js', )
def __init__(self, attrs=None, choices=(), app_namespaces={}):
self.app_namespaces = app_namespaces
super(AppHookSelect, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
if option_value in self.app_namespaces:
data_html = mark_safe(' data-namespace="%s"' % self.app_namespaces[option_value])
else:
data_html = ''
return '<option value="%s"%s%s>%s</option>' % (
option_value,
selected_html,
data_html,
force_text(option_label),
)
def render_options(self, choices, selected_choices):
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
| 0.004448 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProjectTimeSettings.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Samweli Mwakisambwe'
__date__ = '6/3/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsProject,
QgsUnitTypes,
QgsProjectTimeSettings,
QgsReadWriteContext,
QgsDateTimeRange)
from qgis.PyQt.QtCore import (QDate,
QTime,
QDateTime)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtXml import QDomDocument, QDomElement
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
app = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsProjectTimeSettings(unittest.TestCase):
def testTemporalRange(self):
p = QgsProjectTimeSettings()
self.assertTrue(p.temporalRange().isInfinite())
spy = QSignalSpy(p.temporalRangeChanged)
r = QgsDateTimeRange(
QDateTime(QDate(2020, 1, 1), QTime(8, 0, 0)),
QDateTime(QDate(2020, 12, 1), QTime(8, 0, 0))
)
rc = QgsDateTimeRange(
QDateTime(QDate(2020, 1, 1), QTime(8, 0, 0)),
QDateTime(QDate(2020, 12, 1), QTime(8, 0, 0))
)
p.setTemporalRange(r)
self.assertEqual(p.temporalRange(), r)
self.assertEqual(len(spy), 1)
p.setTemporalRange(rc)
self.assertEqual(len(spy), 1)
p.reset()
self.assertEqual(len(spy), 2)
def testGettersSetters(self):
p = QgsProjectTimeSettings()
p.setTimeStep(4.8)
self.assertEqual(p.timeStep(), 4.8)
p.setTimeStepUnit(QgsUnitTypes.TemporalDecades)
self.assertEqual(p.timeStepUnit(), QgsUnitTypes.TemporalDecades)
p.setFramesPerSecond(90)
self.assertEqual(p.framesPerSecond(), 90)
p.setIsTemporalRangeCumulative(True)
self.assertTrue(p.isTemporalRangeCumulative())
def testReadWrite(self):
p = QgsProjectTimeSettings()
self.assertTrue(p.temporalRange().isInfinite())
doc = QDomDocument("testdoc")
elem = p.writeXml(doc, QgsReadWriteContext())
p2 = QgsProjectTimeSettings()
spy = QSignalSpy(p2.temporalRangeChanged)
self.assertTrue(p2.readXml(elem, QgsReadWriteContext()))
self.assertEqual(p2.temporalRange(), p.temporalRange())
self.assertEqual(len(spy), 0)
r = QgsDateTimeRange(
QDateTime(QDate(2020, 1, 1), QTime(8, 0, 0)),
QDateTime(QDate(2020, 12, 1), QTime(8, 0, 0))
)
p.setTemporalRange(r)
p.setTimeStep(4.8)
p.setTimeStepUnit(QgsUnitTypes.TemporalDecades)
p.setFramesPerSecond(90)
p.setIsTemporalRangeCumulative(True)
elem = p.writeXml(doc, QgsReadWriteContext())
p2 = QgsProjectTimeSettings()
spy = QSignalSpy(p2.temporalRangeChanged)
self.assertTrue(p2.readXml(elem, QgsReadWriteContext()))
self.assertEqual(p2.temporalRange(), r)
self.assertEqual(len(spy), 1)
self.assertEqual(p2.timeStep(), 4.8)
self.assertEqual(p2.timeStepUnit(), QgsUnitTypes.TemporalDecades)
self.assertEqual(p2.framesPerSecond(), 90)
self.assertTrue(p.isTemporalRangeCumulative())
if __name__ == '__main__':
unittest.main()
| 0 |
"""
Module used to validate the results of the simulations using various
means. These are not quite tests, since we don't have exact values
to check against, and everything is necessarily approximate.
"""
from __future__ import print_function
from __future__ import division
import sys
import time
import math
import numpy as np
import random
import multiprocessing
from matplotlib import ticker
from matplotlib import pyplot
import ercs
import discsim
import _discsim
class ErcsSingleLocusIdentitySimulator(ercs.Simulator):
"""
Class that calculates identity in state for genes separated by a range
of distances.
"""
def setup(self, num_points, max_distance, mutation_rate, accuracy_goal):
"""
Sets up the simulation so that we calculate identity at the specified
number of points, the maximum distance between points is
max_distance and mutation happens at the specified rate. Also
set the max_time attribute to reflect the specified accuracy_goal.
"""
self.mutation_rate = mutation_rate
self.distances = np.linspace(0, max_distance, num_points)
self.sample = [None, (0, 0)] + [(0, x) for x in self.distances]
self.max_time = math.log(accuracy_goal) / (-2 * mutation_rate)
def get_identity(self, seed):
"""
Returns the probability of identity at all distance classes
in this replicate.
"""
pi, tau = self.run(seed)
mc = ercs.MRCACalculator(pi[0])
n = len(self.distances)
F = [0.0 for j in range(n)]
for j in range(n):
mrca = mc.get_mrca(1, j + 2)
if mrca != 0:
F[j] = math.exp(-2 * self.mutation_rate * tau[0][mrca])
return F
class SingleLocusIdentitySimulator(discsim.Simulator):
"""
Class that calculates identity in state for genes separated by a range
of distances.
"""
def __init__(self, torus_diameter, distances, mutation_rate, accuracy_goal):
super(SingleLocusIdentitySimulator, self).__init__(torus_diameter)
self.__accuracy_goal = accuracy_goal
self.__mutation_rate = mutation_rate
self.__distances = distances
self.__max_time = math.log(accuracy_goal) / (-2 * mutation_rate)
self.sample = [None, (0, 0)] + [(0, x) for x in self.__distances]
def get_identity(self, seed):
"""
Returns the probability of identity at all distance classes
in this replicate.
"""
self.random_seed = seed
self.run(self.__max_time)
pi, tau = self.get_history()
# reset the simulation so we can get another replicate.
self.reset()
mc = ercs.MRCACalculator(pi[0])
n = len(self.__distances)
F = [0.0 for j in range(n)]
for j in range(n):
mrca = mc.get_mrca(1, j + 2)
if mrca != 0:
F[j] = math.exp(-2 * self.__mutation_rate * tau[0][mrca])
return F
def subprocess_identity_worker(t):
sim, seed = t
return sim.get_identity(seed)
def run_identity_replicates(sim, num_replicates, worker_pool):
args = [(sim, random.randint(1, 2**31)) for j in range(num_replicates)]
replicates = worker_pool.map(subprocess_identity_worker, args)
mean_identity = np.mean(np.array(replicates), axis=0)
return mean_identity
def simple_identity_check(r=1, u=0.125, rate=1, num_parents=1,
num_replicates=10000, mutation_rate=1e-6):
"""
Checks identity using very simple model parameters.
"""
events = [ercs.DiscEventClass(r=r, u=u, rate=rate)]
ll_events = [e.get_low_level_representation() for e in events]
torus_diameter = 100
s = _discsim.IdentitySolver(ll_events,
torus_diameter=torus_diameter,
num_quadrature_points=512,
integration_abserr=1e-6,
integration_relerr=0,
integration_workspace_size=1000,
max_x=50, mutation_rate=mutation_rate,
num_parents=num_parents)
s.solve()
# Set up the simulations
num_points = 10
distances = np.linspace(0, 10, num_points)
sim = SingleLocusIdentitySimulator(torus_diameter, distances,
mutation_rate, 1e-6)
sim.num_parents = num_parents
sim.event_classes = events
workers = multiprocessing.Pool(processes=multiprocessing.cpu_count())
F_sim = run_identity_replicates(sim, num_replicates, workers)
F_num = [s.interpolate(x) for x in distances]
for x, fs, fn in zip(distances, F_sim, F_num):
print("{0:.1f}\t{1:.6f}\t{2:.6f}".format(x, fs, fn))
pyplot.plot(distances, F_sim, label="Simulation")
pyplot.plot(distances, F_num, label="Numerical")
pyplot.legend()
pyplot.show()
def mixed_events_identity_check(num_replicates):
torus_diameter = 100
num_points = 50
max_x = 20
mutation_rate = 1e-6
accuracy_goal = 1e-3
small_events = ercs.DiscEventClass(rate=1.0, r=1, u=0.5)
large_events = ercs.DiscEventClass(rate=0.1, r=10, u=0.05)
sim = ErcsSingleLocusIdentitySimulator(torus_diameter)
sim.setup(num_points, max_x, mutation_rate, accuracy_goal)
workers = multiprocessing.Pool(processes=multiprocessing.cpu_count())
l = [small_events, large_events]
sim.event_classes = l
before = time.time()
ercs_F = run_identity_replicates(sim, num_replicates, workers)
duration = time.time() - before
print("ercs done...", duration)
distances = np.linspace(0, max_x, num_points)
sim = SingleLocusIdentitySimulator(torus_diameter, distances,
mutation_rate, 1e-6)
sim.event_classes = l
before = time.time()
discsim_F = run_identity_replicates(sim, num_replicates, workers)
duration = time.time() - before
print("discsim done...", duration)
pyplot.plot(distances, ercs_F, label="ercs")
pyplot.plot(distances, discsim_F, label="discsim")
pyplot.legend()
pyplot.show()
def get_mean_squared_displacement(z, pop):
"""
Returns the mean squared displacement of the specified population from
the specified point.
"""
d2 = 0.0
for p, a in pop:
d2 += (p[0] - z[0])**2
d2 += (p[1] - z[1])**2
n = len(pop)
return d2 / (n * 2)
def single_locus_diffusion(u, r, rate):
"""
Measure the mean squared displacement of lineages for a single
locus simulation.
"""
z = (100, 100)
sample_size = 10000
s = 2.25
L = 100 * s
sim = discsim.Simulator(L)
sim.pixel_size = s
sim.sample = [None] + [z for j in range(sample_size)]
sim.event_classes = [ercs.DiscEventClass(r=r, u=u, rate=rate)]
sim.max_occupancy = 2 * sample_size
sim.max_population_size = 2 * sample_size
sim.print_state()
T = []
X = []
D = []
S = []
for j in range(100):
t = j * 100 * L**2
sim.run(t)
pop = sim.get_population()
msd = get_mean_squared_displacement(z, pop)
t = sim.get_time() / L**2
T.append(t)
X.append(msd)
S.append(t * (r**4) * rate * u * math.pi / 2)
print(T[-1], X[-1], S[-1])
pyplot.plot(T, X, T, S)
pyplot.show()
def subprocess_wave_worker(args):
sim, times, seed = args
sim.random_seed = seed
L = int(sim.torus_diameter)
n = np.zeros((len(times), L))
for j, t in enumerate(times):
sim.run(t)
pop = sim.get_population()
for tup in pop:
if sim.simulate_pedigree:
k = int(tup)
else:
k = int(tup[0])
n[j, k] += 1
n[j, k + 1] += 1
sim.reset()
return n
def run_wave_replicates(sim, times, num_replicates, worker_pool=None):
args = [(sim, times, random.randint(1, 2**31)) for j in range(num_replicates)]
if worker_pool is None:
replicates = [subprocess_wave_worker(a) for a in args]
else:
replicates = worker_pool.map(subprocess_wave_worker, args)
mean_n = []
for j in range(len(times)):
n = []
for r in replicates:
n.append(r[j])
mean_n.append(np.mean(n, axis=0))
return mean_n
def wave_1d(u, num_loci=0):
"""
Simulates the wave of pedigree ancestors in 1D.
"""
N = int(2 / u)
L = 100
s = discsim.Simulator(L, num_loci==0)
if num_loci != 0:
s.num_loci = num_loci
s.max_population_size = 10000
s.event_classes = [ercs.DiscEventClass(r=1, u=u)]
s.sample = [None, L/2, L/2]
workers = multiprocessing.Pool(processes=multiprocessing.cpu_count())
#workers = None
t = [j * 500 * L for j in range(5)]
x = [j for j in range(L)]
for n in run_wave_replicates(s, t, 100, workers):
pyplot.plot(x, n)
pyplot.axhline(0.797 * N)
pyplot.show()
def main():
#simple_identity_check(rate=0.5)
#simple_identity_check(r=0.93, u=0.133, rate=0.5, num_parents=2,
# num_replicates=10**6, mutation_rate=1e-7)
#mixed_events_identity_check(100000)
#plot_mixed_events_identity()
#single_locus_diffusion(u=0.0000125, r=1, rate=1.0)
wave_1d(u=0.005)
#wave_1d(u=0.005, num_loci=100000)
if __name__ == "__main__":
main()
| 0.004894 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Benchmark context managers.
To enable benchmark logging set up ``ggrc.utils.benchmarks`` logger
in ``DEBUG`` level within ``settings`` module:
.. code-block:: python
LOGGING_LOGGERS = {
"ggrc.utils.benchmarks": "DEBUG",
}
"""
import inspect
import logging
import time
from collections import defaultdict
from ggrc import settings
logger = logging.getLogger(__name__)
class BenchmarkContextManager(object):
"""Default benchmark context manager.
This should be used used on appengine instances and by default on dev
instances.
"""
# pylint: disable=too-few-public-methods,unused-argument
# unused arguments is for kwargs that has to be in the init so that all
# benchmark context managers are compatible. See DebugBenchmark init.
def __init__(self, message, **kwargs):
self.message = message
self.start = 0
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_value, exc_trace):
end = time.time()
logger.debug("%.4f %s", end - self.start, self.message)
class WithNop(object):
"""Nop benchmark context manager.
This is a dummy context manager that can be used in place of default or debug
context managers, you can disable them without removing lines of code.
"""
# pylint: disable=too-few-public-methods
def __init__(self, message, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_trace):
pass
class DebugBenchmark(object):
"""Debug benchmark context manager.
This benchmark should be used when debugging performance issues. It has the
most comprehensive output of all context managers.
To enable this, just set GGRC_BENCHMARK env var on the server. You can choose
the summary type by setting the GGRC_BENCHMARK to "all" or "last", where last
will output stats for the last benchmark tree, and all will show data for the
entire history.
Note that this benchmark is useful inside for loops with quiet set to True.
The benchmark itself has some overhead. It's about 10 times slower than
simple addition with func_name given. If func name is not given the it will
run about 200 times slower than simple addition.
For more precise measurements uncomment the c profiler in ggrc.__main__.
"""
_depth = 0
PRINT_TREE = False
PREFIX = "| "
COMPACT_FORM = "{prefix}{last:.4f}"
FULL_FORM = ("{prefix}current: {last:.4f} - max: {max:.4f} - min: "
"{min:.4f} - sum: {sum:.4f} - count: {count:.4f}")
PRINT_FORM = ("sum: {sum:>9.5f} - count: {count:>10.5f} - avg {avg:8.5f}"
" - max: {max:8.5f} - min: {min:8.5f} - {message}")
_stats = defaultdict(lambda: defaultdict(float))
_all_stats = defaultdict(lambda: defaultdict(float))
STATS = {
"all": _all_stats,
"only": _all_stats,
"last": _stats,
"only_last": _stats,
}
_summary = "all"
def __init__(self, message, func_name=None, form=COMPACT_FORM, quiet=False):
"""Initialize a new instance of this benchmark.
Args:
message: String containing the message that will be display with the
benchmark results. It should be unique for all instances.
func_name: Name of the function where this benchmark has been
initialized. If none is given, it will be populated automatically.
Note that this is a slow process.
form: String containing the format of the benchmark results. Two given
options are COMPACT_FORM and FULL_FORM.
"""
self.message = message
self.quiet = quiet
self.form = form
self.start = 0
if func_name is None and self._summary in {"all", "last"}:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 0)
func_name = calframe[1][3]
self.func_name = func_name
def __enter__(self):
"""Start the benchmark timer."""
if not self.quiet and self._summary in {"all", "last"}:
msg = "{}{}: {}".format(
self.PREFIX * DebugBenchmark._depth, self.func_name, self.message)
print msg
if DebugBenchmark._depth == 0:
self._reset_stats()
DebugBenchmark._depth += 1
self.start = time.time()
def __exit__(self, exc_type, exc_value, exc_trace):
"""Stop the benchmark timer.
This function gets the duration of the benchmark and stores it. If this is
the outer most benchmark, the summary of all calls will be printed.
"""
duration = time.time() - self.start
DebugBenchmark._depth -= 1
self.update_stats(duration)
if not self.quiet and self._summary in {"all", "last"}:
msg = self.form.format(
prefix=self.PREFIX * DebugBenchmark._depth,
**self._stats[self.message]
)
print msg
if DebugBenchmark._depth == 0:
self._print_stats(self.STATS[self._summary])
def _update_stats(self, stats, duration):
"""Add duration data to stats.
Args:
stats: mutable dict containing data that will be updated.
duration: time measured by the benchmark.
"""
stats[self.message]["message"] = self.message
stats[self.message]["count"] += 1
stats[self.message]["sum"] += duration
stats[self.message]["last"] = duration
stats[self.message]["max"] = max(stats[self.message]["max"], duration)
stats[self.message]["min"] = min(
stats[self.message]["min"] or duration, # ignore initial zero
duration
)
def update_stats(self, duration):
self._update_stats(self._all_stats, duration)
self._update_stats(self._stats, duration)
@classmethod
def print_stats(cls):
cls._print_stats(cls._all_stats)
@classmethod
def _reset_stats(cls):
cls._stats.clear()
@classmethod
def _print_stats(cls, stats, sort_key="sum"):
"""Print stats summary."""
sorted_ = sorted(stats.values(), key=lambda item: item[sort_key],
reverse=True)
for stat in sorted_:
msg = cls.PRINT_FORM.format(
prefix=stat["message"] + " - ",
avg=stat["sum"] / stat["count"] if stat["count"] else 0,
**stat
)
print msg
@classmethod
def set_summary(cls, summary):
"""Set the summary type for the benchmark context manager.
Three valid summary types are:
all - print all steps and summary of entire history.
last - print all steps and summary of the last request.
only - print only summary of entire history on each request, the tree
of execution steps will be hidden.
only_last - print only summary of last request, the tree of execution
steps will be hidden.
If an invalid parameter is set, stats will not be updated.
"""
if summary.lower() in cls.STATS:
cls._summary = summary.lower()
def get_benchmark():
"""Get a benchmark context manager."""
if settings.DEBUG_BENCHMARK:
DebugBenchmark.set_summary(settings.DEBUG_BENCHMARK)
return DebugBenchmark
else:
return BenchmarkContextManager
| 0.007205 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from twisted.internet import defer
from twisted.python import log
from buildbot.process.properties import Interpolate
from buildbot.process.properties import Properties
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import RETRY
from buildbot.process.results import SKIPPED
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.reporters.base import ReporterBase
from buildbot.reporters.generators.build import BuildStartEndStatusGenerator
from buildbot.reporters.generators.buildrequest import BuildRequestGenerator
from buildbot.reporters.message import MessageFormatterRenderable
from buildbot.util import httpclientservice
from buildbot.util.giturlparse import giturlparse
HOSTED_BASE_URL = 'https://api.github.com'
class GitHubStatusPush(ReporterBase):
name = "GitHubStatusPush"
def checkConfig(self, token, context=None, baseURL=None, verbose=False,
debug=None, verify=None, generators=None,
**kwargs):
if generators is None:
generators = self._create_default_generators()
super().checkConfig(generators=generators, **kwargs)
httpclientservice.HTTPClientService.checkAvailable(self.__class__.__name__)
@defer.inlineCallbacks
def reconfigService(self, token, context=None, baseURL=None, verbose=False,
debug=None, verify=None, generators=None,
**kwargs):
token = yield self.renderSecrets(token)
self.debug = debug
self.verify = verify
self.verbose = verbose
self.context = self.setup_context(context)
if generators is None:
generators = self._create_default_generators()
yield super().reconfigService(generators=generators, **kwargs)
if baseURL is None:
baseURL = HOSTED_BASE_URL
if baseURL.endswith('/'):
baseURL = baseURL[:-1]
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, baseURL, headers={
'Authorization': 'token ' + token,
'User-Agent': 'Buildbot'
},
debug=self.debug, verify=self.verify)
def setup_context(self, context):
return context or Interpolate('buildbot/%(prop:buildername)s')
def _create_default_generators(self):
start_formatter = MessageFormatterRenderable('Build started.')
end_formatter = MessageFormatterRenderable('Build done.')
pending_formatter = MessageFormatterRenderable('Build pending.')
return [
BuildRequestGenerator(formatter=pending_formatter),
BuildStartEndStatusGenerator(start_formatter=start_formatter,
end_formatter=end_formatter)
]
def createStatus(self,
repo_user, repo_name, sha, state, target_url=None,
context=None, issue=None, description=None):
"""
:param repo_user: GitHub user or organization
:param repo_name: Name of the repository
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Build context
:return: A deferred with the result from GitHub.
This code comes from txgithub by @tomprince.
txgithub is based on twisted's webclient agent, which is much less reliable and featureful
as txrequest (support for proxy, connection pool, keep alive, retry, etc)
"""
payload = {'state': state}
if description is not None:
payload['description'] = description
if target_url is not None:
payload['target_url'] = target_url
if context is not None:
payload['context'] = context
return self._http.post(
'/'.join(['/repos', repo_user, repo_name, 'statuses', sha]),
json=payload)
def is_status_2xx(self, code):
return code // 100 == 2
def _extract_issue(self, props):
branch = props.getProperty('branch')
if branch:
m = re.search(r"refs/pull/([0-9]*)/(head|merge)", branch)
if m:
return m.group(1)
return None
def _extract_github_info(self, sourcestamp):
repo_owner = None
repo_name = None
project = sourcestamp['project']
repository = sourcestamp['repository']
if project and "/" in project:
repo_owner, repo_name = project.split('/')
elif repository:
giturl = giturlparse(repository)
if giturl:
repo_owner = giturl.owner
repo_name = giturl.repo
return repo_owner, repo_name
@defer.inlineCallbacks
def sendMessage(self, reports):
report = reports[0]
build = reports[0]['builds'][0]
props = Properties.fromDict(build['properties'])
props.master = self.master
description = report.get('body', None)
if build['complete']:
state = {
SUCCESS: 'success',
WARNINGS: 'success',
FAILURE: 'failure',
SKIPPED: 'success',
EXCEPTION: 'error',
RETRY: 'pending',
CANCELLED: 'error'
}.get(build['results'], 'error')
else:
state = 'pending'
context = yield props.render(self.context)
sourcestamps = build['buildset'].get('sourcestamps')
if not sourcestamps:
return
issue = self._extract_issue(props)
for sourcestamp in sourcestamps:
repo_owner, repo_name = self._extract_github_info(sourcestamp)
if not repo_owner or not repo_name:
log.msg('Skipped status update because required repo information is missing.')
continue
sha = sourcestamp['revision']
response = None
# If the scheduler specifies multiple codebases, don't bother updating
# the ones for which there is no revision
if not sha:
log.msg(
'Skipped status update for codebase {codebase}, '
'context "{context}", issue {issue}.'.format(
codebase=sourcestamp['codebase'], issue=issue, context=context))
continue
try:
if self.verbose:
log.msg("Updating github status: repo_owner={}, repo_name={}".format(
repo_owner, repo_name))
response = yield self.createStatus(repo_user=repo_owner,
repo_name=repo_name,
sha=sha,
state=state,
target_url=build['url'],
context=context,
issue=issue,
description=description)
if not response:
# the implementation of createStatus refused to post update due to missing data
continue
if not self.is_status_2xx(response.code):
raise Exception()
if self.verbose:
log.msg(
'Updated status with "{state}" for {repo_owner}/{repo_name} '
'at {sha}, context "{context}", issue {issue}.'.format(
state=state, repo_owner=repo_owner, repo_name=repo_name,
sha=sha, issue=issue, context=context))
except Exception as e:
if response:
content = yield response.content()
code = response.code
else:
content = code = "n/a"
log.err(
e,
'Failed to update "{state}" for {repo_owner}/{repo_name} '
'at {sha}, context "{context}", issue {issue}. '
'http {code}, {content}'.format(
state=state, repo_owner=repo_owner, repo_name=repo_name,
sha=sha, issue=issue, context=context,
code=code, content=content))
class GitHubCommentPush(GitHubStatusPush):
name = "GitHubCommentPush"
def setup_context(self, context):
return ''
def _create_default_generators(self):
start_formatter = MessageFormatterRenderable(None)
end_formatter = MessageFormatterRenderable('Build done.')
return [
BuildStartEndStatusGenerator(start_formatter=start_formatter,
end_formatter=end_formatter)
]
@defer.inlineCallbacks
def sendMessage(self, reports):
report = reports[0]
if 'body' not in report or report['body'] is None:
return
yield super().sendMessage(reports)
@defer.inlineCallbacks
def createStatus(self,
repo_user, repo_name, sha, state, target_url=None,
context=None, issue=None, description=None):
"""
:param repo_user: GitHub user or organization
:param repo_name: Name of the repository
:param issue: Pull request number
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param description: Short description of the status.
:return: A deferred with the result from GitHub.
This code comes from txgithub by @tomprince.
txgithub is based on twisted's webclient agent, which is much less reliable and featureful
as txrequest (support for proxy, connection pool, keep alive, retry, etc)
"""
payload = {'body': description}
if issue is None:
log.msg('Skipped status update for repo {} sha {} as issue is not specified'.format(
repo_name, sha))
return None
url = '/'.join(['/repos', repo_user, repo_name, 'issues', issue, 'comments'])
ret = yield self._http.post(url, json=payload)
return ret
| 0.001314 |
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
extract_attributes,
js_to_json,
url_or_none,
)
class TV2DKIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?:
tvsyd|
tv2ostjylland|
tvmidtvest|
tv2fyn|
tv2east|
tv2lorry|
tv2nord
)\.dk/
(:[^/]+/)*
(?P<id>[^/?\#&]+)
'''
_TESTS = [{
'url': 'https://www.tvsyd.dk/nyheder/28-10-2019/1930/1930-28-okt-2019?autoplay=1#player',
'info_dict': {
'id': '0_52jmwa0p',
'ext': 'mp4',
'title': '19:30 - 28. okt. 2019',
'timestamp': 1572290248,
'upload_date': '20191028',
'uploader_id': 'tvsyd',
'duration': 1347,
'view_count': int,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
}, {
'url': 'https://www.tv2ostjylland.dk/artikel/minister-gaar-ind-i-sag-om-diabetes-teknologi',
'only_matching': True,
}, {
'url': 'https://www.tv2ostjylland.dk/nyheder/28-10-2019/22/2200-nyhederne-mandag-d-28-oktober-2019?autoplay=1#player',
'only_matching': True,
}, {
'url': 'https://www.tvmidtvest.dk/nyheder/27-10-2019/1930/1930-27-okt-2019',
'only_matching': True,
}, {
'url': 'https://www.tv2fyn.dk/artikel/fyn-kan-faa-landets-foerste-fabrik-til-groent-jetbraendstof',
'only_matching': True,
}, {
'url': 'https://www.tv2east.dk/artikel/gods-faar-indleveret-tonsvis-af-aebler-100-kilo-aebler-gaar-til-en-aeblebrandy',
'only_matching': True,
}, {
'url': 'https://www.tv2lorry.dk/koebenhavn/rasmus-paludan-evakueret-til-egen-demonstration#player',
'only_matching': True,
}, {
'url': 'https://www.tv2nord.dk/artikel/dybt-uacceptabelt',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
entries = []
def add_entry(partner_id, kaltura_id):
entries.append(self.url_result(
'kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura',
video_id=kaltura_id))
for video_el in re.findall(r'(?s)<[^>]+\bdata-entryid\s*=[^>]*>', webpage):
video = extract_attributes(video_el)
kaltura_id = video.get('data-entryid')
if not kaltura_id:
continue
partner_id = video.get('data-partnerid')
if not partner_id:
continue
add_entry(partner_id, kaltura_id)
if not entries:
kaltura_id = self._search_regex(
r'entry_id\s*:\s*["\']([0-9a-z_]+)', webpage, 'kaltura id')
partner_id = self._search_regex(
(r'\\u002Fp\\u002F(\d+)\\u002F', r'/p/(\d+)/'), webpage,
'partner id')
add_entry(partner_id, kaltura_id)
return self.playlist_result(entries)
class TV2DKBornholmPlayIE(InfoExtractor):
_VALID_URL = r'https?://play\.tv2bornholm\.dk/\?.*?\bid=(?P<id>\d+)'
_TEST = {
'url': 'http://play.tv2bornholm.dk/?area=specifikTV&id=781021',
'info_dict': {
'id': '781021',
'ext': 'mp4',
'title': '12Nyheder-27.11.19',
},
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'https://play.tv2bornholm.dk/controls/AJAX.aspx/specifikVideo', video_id,
data=json.dumps({
'playlist_id': video_id,
'serienavn': '',
}).encode(), headers={
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=UTF-8',
})['d']
# TODO: generalize flowplayer
title = self._search_regex(
r'title\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', video, 'title',
group='value')
sources = self._parse_json(self._search_regex(
r'(?s)sources:\s*(\[.+?\]),', video, 'sources'),
video_id, js_to_json)
formats = []
srcs = set()
for source in sources:
src = url_or_none(source.get('src'))
if not src:
continue
if src in srcs:
continue
srcs.add(src)
ext = determine_ext(src)
src_type = source.get('type')
if src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
else:
formats.append({
'url': src,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
}
| 0.001587 |
# Orca
#
# Copyright 2010 Consorcio Fernando de los Rios.
# Author: Javier Hernandez Antunez <[email protected]>
# Author: Alejandro Leiva <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays the Save Profile As dialog."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Consorcio Fernando de los Rios."
__license__ = "LGPL"
import locale
import sys
from gi.repository import Gtk
import orca_state
from orca_i18n import _
OS = None
newProfile = None
class OrcaProfileGUI(Gtk.Dialog):
def __init__(self):
"""Initialize the Orca profile configuration GUI."""
Gtk.Dialog.__init__(self)
# Translators: Profiles in Orca make it possible for users to
# quickly switch amongst a group of pre-defined settings (e.g.
# an 'English' profile for reading text written in English using
# an English-language speech synthesizer and braille rules, and
# a similar 'Spanish' profile for reading Spanish text. The
# following string is the title of a dialog in which users can
# save a newly-defined profile.
#
self.set_title(_('Save Profile As'))
self.set_has_resize_grip(False)
self.add_button('gtk-cancel', Gtk.ResponseType.CANCEL)
self.add_button('gtk-save', Gtk.ResponseType.ACCEPT)
grid = Gtk.Grid()
grid.set_property('margin', 12)
grid.set_row_spacing(10)
grid.set_column_spacing(10)
# Right now the content area is a GtkBox. We'll need to update
# this once GtkBox is fully deprecated.
contentArea = self.get_content_area()
contentArea.pack_start(grid, True, True, 0)
self.profileEntry = Gtk.Entry()
self.profileEntry.set_property('hexpand', True)
self.profileEntry.set_activates_default(True)
grid.attach(self.profileEntry, 1, 0, 1, 1)
# Translators: Profiles in Orca make it possible for users to
# quickly switch amongst a group of pre-defined settings (e.g.
# an 'English' profile for reading text written in English using
# an English-language speech synthesizer and braille rules, and
# a similar 'Spanish' profile for reading Spanish text. The
# following string is the label for a text entry in which the user
# enters the name of a new settings profile being saved via the
# 'Save Profile As' dialog.
#
label = Gtk.Label(_('_Profile Name:'))
label.set_use_underline(True)
label.set_mnemonic_widget(self.profileEntry)
grid.attach(label, 0, 0, 1, 1)
defaultButton = self.get_widget_for_response(Gtk.ResponseType.ACCEPT)
defaultButton.set_property('can-default', True)
defaultButton.set_property('has-default', True)
self.connect('response', self.onResponse)
self.connect('destroy', self.onDestroy)
self.searchString = None
self.profileString = None
self.prefsDialog = None
def init(self):
self.profileString = ''
def showGUI(self, prefsDialog):
"""Show the Save Profile As dialog."""
self.show_all()
self.prefsDialog = prefsDialog
self.profileEntry.set_text(self.profileString)
ts = orca_state.lastInputEventTimestamp
if ts == 0:
ts = Gtk.get_current_event_time()
self.present_with_time(ts)
def onResponse(self, widget, response):
"""Signal handler for the responses emitted by the dialog."""
if response in [Gtk.ResponseType.CANCEL, Gtk.ResponseType.DELETE_EVENT]:
self.hide()
return
if response == Gtk.ResponseType.ACCEPT:
global newProfile
newProfile = self.profileEntry.get_text()
if newProfile:
self.destroy()
if self.prefsDialog:
self.prefsDialog.saveProfile(newProfile)
def onDestroy(self, widget):
"""Signal handler for the 'destroy' signal of the dialog."""
global OS
OS = None
def showProfileUI(prefsDialog=None):
global OS
global newProfile
newProfile = None
if not OS:
OS = OrcaProfileGUI()
OS.init()
OS.showGUI(prefsDialog)
def main():
locale.setlocale(locale.LC_ALL, '')
showProfileUI()
Gtk.main()
sys.exit(0)
if __name__ == "__main__":
main()
| 0.002718 |
# -*- coding: utf-8 -*-
#
# pylast - A Python interface to Last.fm (and other API compatible social networks)
#
# Copyright 2008-2010 Amr Hassan
#
# This version has been modified by Fei to include some missing info from the lib.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://code.google.com/p/pylast/
__version__ = '0.5'
__author__ = 'Amr Hassan'
__copyright__ = "Copyright (C) 2008-2010 Amr Hassan"
__license__ = "apache2"
__email__ = '[email protected]'
import hashlib
from xml.dom import minidom
import xml.dom
import time
import shelve
import tempfile
import sys
import collections
import warnings
def _deprecation_warning(message):
warnings.warn(message, DeprecationWarning)
if sys.version_info[0] == 3:
from http.client import HTTPConnection
import html.entities as htmlentitydefs
from urllib.parse import splithost as url_split_host
from urllib.parse import quote_plus as url_quote_plus
unichr = chr
elif sys.version_info[0] == 2:
from httplib import HTTPConnection
import htmlentitydefs
from urllib import splithost as url_split_host
from urllib import quote_plus as url_quote_plus
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_INVALID_SIGNATURE = 13
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
EVENT_ATTENDING = '0'
EVENT_MAYBE_ATTENDING = '1'
EVENT_NOT_ATTENDING = '2'
PERIOD_OVERALL = 'overall'
PERIOD_7DAYS = "7day"
PERIOD_3MONTHS = '3month'
PERIOD_6MONTHS = '6month'
PERIOD_12MONTHS = '12month'
DOMAIN_ENGLISH = 0
DOMAIN_GERMAN = 1
DOMAIN_SPANISH = 2
DOMAIN_FRENCH = 3
DOMAIN_ITALIAN = 4
DOMAIN_POLISH = 5
DOMAIN_PORTUGUESE = 6
DOMAIN_SWEDISH = 7
DOMAIN_TURKISH = 8
DOMAIN_RUSSIAN = 9
DOMAIN_JAPANESE = 10
DOMAIN_CHINESE = 11
COVER_SMALL = 0
COVER_MEDIUM = 1
COVER_LARGE = 2
COVER_EXTRA_LARGE = 3
COVER_MEGA = 4
IMAGES_ORDER_POPULARITY = "popularity"
IMAGES_ORDER_DATE = "dateadded"
USER_MALE = 'Male'
USER_FEMALE = 'Female'
SCROBBLE_SOURCE_USER = "P"
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST = "R"
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST = "E"
SCROBBLE_SOURCE_LASTFM = "L"
SCROBBLE_SOURCE_UNKNOWN = "U"
SCROBBLE_MODE_PLAYED = ""
SCROBBLE_MODE_LOVED = "L"
SCROBBLE_MODE_BANNED = "B"
SCROBBLE_MODE_SKIPPED = "S"
class _Network(object):
"""
A music social network website that is Last.fm or one exposing a Last.fm compatible API
"""
def __init__(self, name, homepage, ws_server, api_key, api_secret, session_key, submission_server, username, password_hash,
domain_names, urls):
"""
name: the name of the network
homepage: the homepage url
ws_server: the url of the webservices server
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
submission_server: the url of the server to which tracks are submitted (scrobbled)
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
domain_names: a dict mapping each DOMAIN_* value to a string domain name
urls: a dict mapping types to urls
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
You should use a preconfigured network object through a get_*_network(...) method instead of creating an object
of this class, unless you know what you're doing.
"""
self.name = name
self.homepage = homepage
self.ws_server = ws_server
self.api_key = api_key
self.api_secret = api_secret
self.session_key = session_key
self.submission_server = submission_server
self.username = username
self.password_hash = password_hash
self.domain_names = domain_names
self.urls = urls
self.cache_backend = None
self.proxy_enabled = False
self.proxy = None
self.last_call_time = 0
#generate a session_key if necessary
if (self.api_key and self.api_secret) and not self.session_key and (self.username and self.password_hash):
sk_gen = SessionKeyGenerator(self)
self.session_key = sk_gen.get_session_key(self.username, self.password_hash)
"""def __repr__(self):
attributes = ("name", "homepage", "ws_server", "api_key", "api_secret", "session_key", "submission_server",
"username", "password_hash", "domain_names", "urls")
text = "pylast._Network(%s)"
args = []
for attr in attributes:
args.append("=".join((attr, repr(getattr(self, attr)))))
return text % ", ".join(args)
"""
def __str__(self):
return "The %s Network" %self.name
def get_artist(self, artist_name):
"""
Return an Artist object
"""
return Artist(artist_name, self)
def get_track(self, artist, title):
"""
Return a Track object
"""
return Track(artist, title, self)
def get_album(self, artist, title):
"""
Return an Album object
"""
return Album(artist, title, self)
def get_authenticated_user(self):
"""
Returns the authenticated user
"""
return AuthenticatedUser(self)
def get_country(self, country_name):
"""
Returns a country object
"""
return Country(country_name, self)
def get_group(self, name):
"""
Returns a Group object
"""
return Group(name, self)
def get_user(self, username):
"""
Returns a user object
"""
return User(username, self)
def get_tag(self, name):
"""
Returns a tag object
"""
return Tag(name, self)
def get_scrobbler(self, client_id, client_version):
"""
Returns a Scrobbler object used for submitting tracks to the server
Quote from http://www.last.fm/api/submissions:
========
Client identifiers are used to provide a centrally managed database of
the client versions, allowing clients to be banned if they are found to
be behaving undesirably. The client ID is associated with a version
number on the server, however these are only incremented if a client is
banned and do not have to reflect the version of the actual client application.
During development, clients which have not been allocated an identifier should
use the identifier tst, with a version number of 1.0. Do not distribute code or
client implementations which use this test identifier. Do not use the identifiers
used by other clients.
=========
To obtain a new client identifier please contact:
* Last.fm: [email protected]
* # TODO: list others
...and provide us with the name of your client and its homepage address.
"""
_deprecation_warning("Use _Network.scrobble(...), _Network.scrobble_many(...), and Netowrk.update_now_playing(...) instead")
return Scrobbler(self, client_id, client_version)
def _get_language_domain(self, domain_language):
"""
Returns the mapped domain name of the network to a DOMAIN_* value
"""
if domain_language in self.domain_names:
return self.domain_names[domain_language]
def _get_url(self, domain, type):
return "http://%s/%s" %(self._get_language_domain(domain), self.urls[type])
def _get_ws_auth(self):
"""
Returns a (API_KEY, API_SECRET, SESSION_KEY) tuple.
"""
return (self.api_key, self.api_secret, self.session_key)
def _delay_call(self):
"""
Makes sure that web service calls are at least a second apart
"""
# delay time in seconds
DELAY_TIME = 1.0
now = time.time()
if (now - self.last_call_time) < DELAY_TIME:
time.sleep(1)
self.last_call_time = now
def create_new_playlist(self, title, description):
"""
Creates a playlist for the authenticated user and returns it
title: The title of the new playlist.
description: The description of the new playlist.
"""
params = {}
params['title'] = title
params['description'] = description
doc = _Request(self, 'playlist.create', params).execute(False)
e_id = doc.getElementsByTagName("id")[0].firstChild.data
user = doc.getElementsByTagName('playlists')[0].getAttribute('user')
return Playlist(user, e_id, self)
def get_top_tags(self, limit=None):
"""Returns a sequence of the most used tags as a sequence of TopItem objects."""
doc = _Request(self, "tag.getTopTags").execute(True)
seq = []
for node in doc.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self)
weight = _number(_extract(node, "count"))
seq.append(TopItem(tag, weight))
if limit:
seq = seq[:limit]
return seq
def enable_proxy(self, host, port):
"""Enable a default web proxy"""
self.proxy = [host, _number(port)]
self.proxy_enabled = True
def disable_proxy(self):
"""Disable using the web proxy"""
self.proxy_enabled = False
def is_proxy_enabled(self):
"""Returns True if a web proxy is enabled."""
return self.proxy_enabled
def _get_proxy(self):
"""Returns proxy details."""
return self.proxy
def enable_caching(self, file_path = None):
"""Enables caching request-wide for all cachable calls.
In choosing the backend used for caching, it will try _SqliteCacheBackend first if
the module sqlite3 is present. If not, it will fallback to _ShelfCacheBackend which uses shelve.Shelf objects.
* file_path: A file path for the backend storage file. If
None set, a temp file would probably be created, according the backend.
"""
if not file_path:
file_path = tempfile.mktemp(prefix="pylast_tmp_")
self.cache_backend = _ShelfCacheBackend(file_path)
def disable_caching(self):
"""Disables all caching features."""
self.cache_backend = None
def is_caching_enabled(self):
"""Returns True if caching is enabled."""
return not (self.cache_backend == None)
def _get_cache_backend(self):
return self.cache_backend
def search_for_album(self, album_name):
"""Searches for an album by its name. Returns a AlbumSearch object.
Use get_next_page() to retreive sequences of results."""
return AlbumSearch(album_name, self)
def search_for_artist(self, artist_name):
"""Searches of an artist by its name. Returns a ArtistSearch object.
Use get_next_page() to retreive sequences of results."""
return ArtistSearch(artist_name, self)
def search_for_tag(self, tag_name):
"""Searches of a tag by its name. Returns a TagSearch object.
Use get_next_page() to retreive sequences of results."""
return TagSearch(tag_name, self)
def search_for_track(self, artist_name, track_name):
"""Searches of a track by its name and its artist. Set artist to an empty string if not available.
Returns a TrackSearch object.
Use get_next_page() to retreive sequences of results."""
return TrackSearch(artist_name, track_name, self)
def search_for_venue(self, venue_name, country_name):
"""Searches of a venue by its name and its country. Set country_name to an empty string if not available.
Returns a VenueSearch object.
Use get_next_page() to retreive sequences of results."""
return VenueSearch(venue_name, country_name, self)
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
def get_artist_by_mbid(self, mbid):
"""Loooks up an artist by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "artist.getInfo", params).execute(True)
return Artist(_extract(doc, "name"), self)
def get_album_by_mbid(self, mbid):
"""Looks up an album by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "album.getInfo", params).execute(True)
return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
def update_now_playing(self, artist, title, album = None, album_artist = None,
duration = None, track_number = None, mbid = None, context = None):
"""
Used to notify Last.fm that a user has started listening to a track.
Parameters:
artist (Required) : The artist name
title (Required) : The track title
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs from the track artist.
duration (Optional) : The length of the track in seconds.
track_number (Optional) : The track number of the track on the album.
mbid (Optional) : The MusicBrainz Track ID.
context (Optional) : Sub-client version (not public, only enabled for certain API keys)
"""
params = {"track": title, "artist": artist}
if album: params["album"] = album
if album_artist: params["albumArtist"] = album_artist
if context: params["context"] = context
if track_number: params["trackNumber"] = track_number
if mbid: params["mbid"] = mbid
if duration: params["duration"] = duration
_Request(self, "track.updateNowPlaying", params).execute()
def scrobble(self, artist, title, timestamp, album = None, album_artist = None, track_number = None,
duration = None, stream_id = None, context = None, mbid = None):
"""Used to add a track-play to a user's profile.
Parameters:
artist (Required) : The artist name.
title (Required) : The track name.
timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone.
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs from the track artist.
context (Optional) : Sub-client version (not public, only enabled for certain API keys)
stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service.
track_number (Optional) : The track number of the track on the album.
mbid (Optional) : The MusicBrainz Track ID.
duration (Optional) : The length of the track in seconds.
"""
return self.scrobble_many(({"artist": artist, "title": title, "timestamp": timestamp, "album": album, "album_artist": album_artist,
"track_number": track_number, "duration": duration, "stream_id": stream_id, "context": context, "mbid": mbid},))
def scrobble_many(self, tracks):
"""
Used to scrobble a batch of tracks at once. The parameter tracks is a sequence of dicts per
track containing the keyword arguments as if passed to the scrobble() method.
"""
tracks_to_scrobble = tracks[:50]
if len(tracks) > 50:
remaining_tracks = tracks[50:]
else:
remaining_tracks = None
params = {}
for i in range(len(tracks_to_scrobble)):
params["artist[%d]" % i] = tracks_to_scrobble[i]["artist"]
params["track[%d]" % i] = tracks_to_scrobble[i]["title"]
additional_args = ("timestamp", "album", "album_artist", "context", "stream_id", "track_number", "mbid", "duration")
args_map_to = {"album_artist": "albumArtist", "track_number": "trackNumber", "stream_id": "streamID"} # so friggin lazy
for arg in additional_args:
if arg in tracks_to_scrobble[i] and tracks_to_scrobble[i][arg]:
if arg in args_map_to:
maps_to = args_map_to[arg]
else:
maps_to = arg
params["%s[%d]" %(maps_to, i)] = tracks_to_scrobble[i][arg]
_Request(self, "track.scrobble", params).execute()
if remaining_tracks:
self.scrobble_many(remaining_tracks)
class LastFMNetwork(_Network):
"""A Last.fm network object
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see about obtaining them from:
http://www.last.fm/api/account
"""
def __init__(self, api_key="", api_secret="", session_key="", username="", password_hash=""):
_Network.__init__(self,
name = "Last.fm",
homepage = "http://last.fm",
ws_server = ("ws.audioscrobbler.com", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://post.audioscrobbler.com:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: 'www.last.fm',
DOMAIN_GERMAN: 'www.lastfm.de',
DOMAIN_SPANISH: 'www.lastfm.es',
DOMAIN_FRENCH: 'www.lastfm.fr',
DOMAIN_ITALIAN: 'www.lastfm.it',
DOMAIN_POLISH: 'www.lastfm.pl',
DOMAIN_PORTUGUESE: 'www.lastfm.com.br',
DOMAIN_SWEDISH: 'www.lastfm.se',
DOMAIN_TURKISH: 'www.lastfm.com.tr',
DOMAIN_RUSSIAN: 'www.lastfm.ru',
DOMAIN_JAPANESE: 'www.lastfm.jp',
DOMAIN_CHINESE: 'cn.last.fm',
},
urls = {
"album": "music/%(artist)s/%(album)s",
"artist": "music/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def __repr__(self):
return "pylast.LastFMNetwork(%s)" %(", ".join(("'%s'" %self.api_key, "'%s'" %self.api_secret, "'%s'" %self.session_key,
"'%s'" %self.username, "'%s'" %self.password_hash)))
def __str__(self):
return "LastFM Network"
def get_lastfm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured _Network object for Last.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see about obtaining them from:
http://www.last.fm/api/account
"""
_deprecation_warning("Create a LastFMNetwork object instead")
return LastFMNetwork(api_key, api_secret, session_key, username, password_hash)
class LibreFMNetwork(_Network):
"""
A preconfigured _Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
"""
def __init__(self, api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
_Network.__init__(self,
name = "Libre.fm",
homepage = "http://alpha.dev.libre.fm",
ws_server = ("alpha.dev.libre.fm", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://turtle.libre.fm:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: "alpha.dev.libre.fm",
DOMAIN_GERMAN: "alpha.dev.libre.fm",
DOMAIN_SPANISH: "alpha.dev.libre.fm",
DOMAIN_FRENCH: "alpha.dev.libre.fm",
DOMAIN_ITALIAN: "alpha.dev.libre.fm",
DOMAIN_POLISH: "alpha.dev.libre.fm",
DOMAIN_PORTUGUESE: "alpha.dev.libre.fm",
DOMAIN_SWEDISH: "alpha.dev.libre.fm",
DOMAIN_TURKISH: "alpha.dev.libre.fm",
DOMAIN_RUSSIAN: "alpha.dev.libre.fm",
DOMAIN_JAPANESE: "alpha.dev.libre.fm",
DOMAIN_CHINESE: "alpha.dev.libre.fm",
},
urls = {
"album": "artist/%(artist)s/album/%(album)s",
"artist": "artist/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def __repr__(self):
return "pylast.LibreFMNetwork(%s)" %(", ".join(("'%s'" %self.api_key, "'%s'" %self.api_secret, "'%s'" %self.session_key,
"'%s'" %self.username, "'%s'" %self.password_hash)))
def __str__(self):
return "Libre.fm Network"
def get_librefm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured _Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
"""
_deprecation_warning("DeprecationWarning: Create a LibreFMNetwork object instead")
return LibreFMNetwork(api_key, api_secret, session_key, username, password_hash)
class _ShelfCacheBackend(object):
"""Used as a backend for caching cacheable requests."""
def __init__(self, file_path = None):
self.shelf = shelve.open(file_path)
def get_xml(self, key):
return self.shelf[key]
def set_xml(self, key, xml_string):
self.shelf[key] = xml_string
def has_key(self, key):
return key in self.shelf.keys()
class _Request(object):
"""Representing an abstract web service operation."""
def __init__(self, network, method_name, params = {}):
self.network = network
self.params = {}
for key in params:
self.params[key] = _unicode(params[key])
(self.api_key, self.api_secret, self.session_key) = network._get_ws_auth()
self.params["api_key"] = self.api_key
self.params["method"] = method_name
if network.is_caching_enabled():
self.cache = network._get_cache_backend()
if self.session_key:
self.params["sk"] = self.session_key
self.sign_it()
def sign_it(self):
"""Sign this request."""
if not "api_sig" in self.params.keys():
self.params['api_sig'] = self._get_signature()
def _get_signature(self):
"""Returns a 32-character hexadecimal md5 hash of the signature string."""
keys = list(self.params.keys())
keys.sort()
string = ""
for name in keys:
string += name
string += self.params[name]
string += self.api_secret
return md5(string)
def _get_cache_key(self):
"""The cache key is a string of concatenated sorted names and values."""
keys = list(self.params.keys())
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + _string(self.params[key])
return hashlib.sha1(cache_key).hexdigest()
def _get_cached_response(self):
"""Returns a file object of the cached response."""
if not self._is_cached():
response = self._download_response()
self.cache.set_xml(self._get_cache_key(), response)
return self.cache.get_xml(self._get_cache_key())
def _is_cached(self):
"""Returns True if the request is already in cache."""
return self.cache.has_key(self._get_cache_key())
def _download_response(self):
"""Returns a response body string from the server."""
# Delay the call if necessary
#self.network._delay_call() # enable it if you want.
data = []
for name in self.params.keys():
data.append('='.join((name, url_quote_plus(_string(self.params[name])))))
data = '&'.join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
'Accept-Charset': 'utf-8',
'User-Agent': "pylast" + '/' + __version__
}
(HOST_NAME, HOST_SUBDIR) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = HTTPConnection(host = self._get_proxy()[0], port = self._get_proxy()[1])
try:
conn.request(method='POST', url="http://" + HOST_NAME + HOST_SUBDIR,
body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
else:
conn = HTTPConnection(host=HOST_NAME)
try:
conn.request(method='POST', url=HOST_SUBDIR, body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
try:
response_text = _unicode(conn.getresponse().read())
except Exception as e:
raise MalformedResponseError(self.network, e)
self._check_response_for_errors(response_text)
return response_text
def execute(self, cacheable = False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response))
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
try:
doc = minidom.parseString(_string(response))
except Exception as e:
raise MalformedResponseError(self.network, e)
e = doc.getElementsByTagName('lfm')[0]
if e.getAttribute('status') != "ok":
e = doc.getElementsByTagName('error')[0]
status = e.getAttribute('code')
details = e.firstChild.data.strip()
raise WSError(self.network, status, details)
class SessionKeyGenerator(object):
"""Methods of generating a session key:
1) Web Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. sg = SessionKeyGenerator(network)
c. url = sg.get_web_auth_url()
d. Ask the user to open the url and authorize you, and wait for it.
e. session_key = sg.get_web_auth_session_key(url)
2) Username and Password Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. username = raw_input("Please enter your username: ")
c. password_hash = pylast.md5(raw_input("Please enter your password: ")
d. session_key = SessionKeyGenerator(network).get_session_key(username, password_hash)
A session key's lifetime is infinie, unless the user provokes the rights of the given API Key.
If you create a Network object with just a API_KEY and API_SECRET and a username and a password_hash, a
SESSION_KEY will be automatically generated for that network and stored in it so you don't have to do this
manually, unless you want to.
"""
def __init__(self, network):
self.network = network
self.web_auth_tokens = {}
def _get_web_auth_token(self):
"""Retrieves a token from the network for web authentication.
The token then has to be authorized from getAuthURL before creating session.
"""
request = _Request(self.network, 'auth.getToken')
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
e = doc.getElementsByTagName('token')[0]
return e.firstChild.data
def get_web_auth_url(self):
"""The user must open this page, and you first, then call get_web_auth_session_key(url) after that."""
token = self._get_web_auth_token()
url = '%(homepage)s/api/auth/?api_key=%(api)s&token=%(token)s' % \
{"homepage": self.network.homepage, "api": self.network.api_key, "token": token}
self.web_auth_tokens[url] = token
return url
def get_web_auth_session_key(self, url):
"""Retrieves the session key of a web authorization process by its url."""
if url in self.web_auth_tokens.keys():
token = self.web_auth_tokens[url]
else:
token = "" #that's gonna raise a WSError of an unauthorized token when the request is executed.
request = _Request(self.network, 'auth.getSession', {'token': token})
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return doc.getElementsByTagName('key')[0].firstChild.data
def get_session_key(self, username, password_hash):
"""Retrieve a session key with a username and a md5 hash of the user's password."""
params = {"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key")
TopItem = collections.namedtuple("TopItem", ["item", "weight"])
SimilarItem = collections.namedtuple("SimilarItem", ["item", "match"])
LibraryItem = collections.namedtuple("LibraryItem", ["item", "playcount", "tagcount"])
PlayedTrack = collections.namedtuple("PlayedTrack", ["track", "playback_date", "timestamp"])
LovedTrack = collections.namedtuple("LovedTrack", ["track", "date", "timestamp"])
ImageSizes = collections.namedtuple("ImageSizes", ["original", "large", "largesquare", "medium", "small", "extralarge"])
Image = collections.namedtuple("Image", ["title", "url", "dateadded", "format", "owner", "sizes", "votes"])
Shout = collections.namedtuple("Shout", ["body", "author", "date"])
def _string_output(funct):
def r(*args):
return _string(funct(*args))
return r
def _pad_list(given_list, desired_length, padding = None):
"""
Pads a list to be of the desired_length.
"""
while len(given_list) < desired_length:
given_list.append(padding)
return given_list
class _BaseObject(object):
"""An abstract webservices object."""
network = None
def __init__(self, network):
self.network = network
def _request(self, method_name, cacheable = False, params = None):
if not params:
params = self._get_params()
return _Request(self.network, method_name, params).execute(cacheable)
def _get_params(self):
"""Returns the most common set of parameters between all objects."""
return {}
def __hash__(self):
return hash(self.network) + \
hash(str(type(self)) + "".join(list(self._get_params().keys()) + list(self._get_params().values())).lower())
class _Taggable(object):
"""Common functions for classes with tags."""
def __init__(self, ws_prefix):
self.ws_prefix = ws_prefix
def add_tags(self, tags):
"""Adds one or several tags.
* tags: A sequence of tag names or Tag objects.
"""
for tag in tags:
self.add_tag(tag)
def add_tag(self, tag):
"""Adds one tag.
* tag: a tag name or a Tag object.
"""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tags'] = tag
self._request(self.ws_prefix + '.addTags', False, params)
def remove_tag(self, tag):
"""Remove a user's tag from this object."""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tag'] = tag
self._request(self.ws_prefix + '.removeTag', False, params)
def get_tags(self):
"""Returns a list of the tags set by the user to this object."""
# Uncacheable because it can be dynamically changed by the user.
params = self._get_params()
doc = self._request(self.ws_prefix + '.getTags', False, params)
tag_names = _extract_all(doc, 'name')
tags = []
for tag in tag_names:
tags.append(Tag(tag, self.network))
return tags
def remove_tags(self, tags):
"""Removes one or several tags from this object.
* tags: a sequence of tag names or Tag objects.
"""
for tag in tags:
self.remove_tag(tag)
def clear_tags(self):
"""Clears all the user-set tags. """
self.remove_tags(*(self.get_tags()))
def set_tags(self, tags):
"""Sets this object's tags to only those tags.
* tags: a sequence of tag names or Tag objects.
"""
c_old_tags = []
old_tags = []
c_new_tags = []
new_tags = []
to_remove = []
to_add = []
tags_on_server = self.get_tags()
for tag in tags_on_server:
c_old_tags.append(tag.get_name().lower())
old_tags.append(tag.get_name())
for tag in tags:
c_new_tags.append(tag.lower())
new_tags.append(tag)
for i in range(0, len(old_tags)):
if not c_old_tags[i] in c_new_tags:
to_remove.append(old_tags[i])
for i in range(0, len(new_tags)):
if not c_new_tags[i] in c_old_tags:
to_add.append(new_tags[i])
self.remove_tags(to_remove)
self.add_tags(to_add)
def get_top_tags(self, limit=None):
"""Returns a list of the most frequently used Tags on this object."""
doc = self._request(self.ws_prefix + '.getTopTags', True)
elements = doc.getElementsByTagName('tag')
seq = []
for element in elements:
tag_name = _extract(element, 'name')
tagcount = _extract(element, 'count')
seq.append(TopItem(Tag(tag_name, self.network), tagcount))
if limit:
seq = seq[:limit]
return seq
class WSError(Exception):
"""Exception related to the Network web service"""
def __init__(self, network, status, details):
self.status = status
self.details = details
self.network = network
@_string_output
def __str__(self):
return self.details
def get_id(self):
"""Returns the exception ID, from one of the following:
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
"""
return self.status
class MalformedResponseError(Exception):
"""Exception conveying a malformed response from Last.fm."""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "Malformed response from Last.fm. Underlying error: %s" %str(self.underlying_error)
class NetworkError(Exception):
"""Exception conveying a problem in sending a request to Last.fm"""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "NetworkError: %s" %str(self.underlying_error)
class Album(_BaseObject, _Taggable):
"""An album."""
title = None
artist = None
def __init__(self, artist, title, network):
"""
Create an album instance.
# Parameters:
* artist: An artist name or an Artist object.
* title: The album title.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'album')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
def __repr__(self):
return "pylast.Album(%s, %s, %s)" %(repr(self.artist.name), repr(self.title), repr(self.network))
@_string_output
def __str__(self):
return _unicode("%s - %s") %(self.get_artist().get_name(), self.get_title())
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'album': self.get_title(), }
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self):
"""Returns the album title."""
return self.title
def get_name(self):
"""Returns the album title (alias to Album.get_title)."""
return self.get_title()
def get_release_date(self):
"""Retruns the release date of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "releasedate")
def get_cover_image(self, size = COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("album.getInfo", cacheable = True), 'image')[size]
def get_id(self):
"""Returns the ID"""
return _extract(self._request("album.getInfo", cacheable = True), "id")
def get_playcount(self):
"""Returns the number of plays on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "playcount"))
def get_listener_count(self):
"""Returns the number of liteners on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "listeners"))
def get_top_tags(self, limit=None):
"""Returns a list of the most-applied tags to this album."""
doc = self._request("album.getInfo", True)
e = doc.getElementsByTagName("toptags")[0]
seq = []
for name in _extract_all(e, "name"):
seq.append(Tag(name, self.network))
if limit:
seq = seq[:limit]
return seq
def get_tracks(self):
"""Returns the list of Tracks on this album."""
uri = 'lastfm://playlist/album/%s' %self.get_id()
return XSPF(uri, self.network).get_tracks()
def get_mbid(self):
"""Returns the MusicBrainz id of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "mbid")
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the album page on the network.
# Parameters:
* domain_name str: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
album = _url_safe(self.get_title())
return self.network._get_url(domain_name, "album") %{'artist': artist, 'album': album}
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
class Artist(_BaseObject, _Taggable):
"""An artist."""
name = None
def __init__(self, name, network):
"""Create an artist object.
# Parameters:
* name str: The artist's name.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'artist')
self.name = name
def __repr__(self):
return "pylast.Artist(%s, %s)" %(repr(self.get_name()), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {'artist': self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the name of the artist.
If properly_capitalized was asserted then the name would be downloaded
overwriting the given one."""
if properly_capitalized:
self.name = _extract(self._request("artist.getInfo", True), "name")
return self.name
def get_cover_image(self, size = COVER_MEGA):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("artist.getInfo", True), "image")[size]
def get_playcount(self):
"""Returns the number of plays on the network."""
return _number(_extract(self._request("artist.getInfo", True), "playcount"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this artist."""
doc = self._request("artist.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the number of liteners on the network."""
if hasattr(self, "listener_count"):
return self.listener_count
else:
self.listener_count = _number(_extract(self._request("artist.getInfo", True), "listeners"))
return self.listener_count
def is_streamable(self):
"""Returns True if the artist is streamable."""
return bool(_number(_extract(self._request("artist.getInfo", True), "streamable")))
def get_bio_published_date(self):
"""Returns the date on which the artist's biography was published."""
return _extract(self._request("artist.getInfo", True), "published")
def get_bio_summary(self, language=None):
"""Returns the summary of the artist's biography."""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return _extract(self._request("artist.getInfo", True, params), "summary")
def get_bio_content(self, language=None):
"""Returns the content of the artist's biography."""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return _extract(self._request("artist.getInfo", True, params), "content")
def get_top_tags(self):
"""Return a list of genre tags"""
doc = self._request("artist.getInfo", True)
tags = []
for node in doc.getElementsByTagName("tag"):
tag_name = _extract(node, 'name')
tags.append(tag_name)
return tags
def get_upcoming_events(self):
# """Returns a list of the upcoming Events for this artist."""
"""Return a list of event dictionary along with the Events object
for the artist to prevent less last.py queries"""
doc = self._request('artist.getEvents', True)
# Bug: it gets the venue id as well.
# ids = _extract_all(doc, 'id')
# events = []
# for e_id in ids:
# events.append(Event(e_id, self.network))
upcoming_events = _extract_all(doc, 'event')
events = []
for e_node in doc.getElementsByTagName('event'):
event = {}
event["id"] = _extract(e_node, 'id')
event["title"] = _extract(e_node, 'title')
event["startDate"] = _extract(e_node, 'startDate')
event["endDate"] = _extract(e_node, 'endDate')
event["url"] = _extract(e_node, 'url', 1)
event["event"] = Event(event["id"], self.network)
events.append(event)
return events
def get_similar(self, limit = None):
"""Returns the similar artists on the network."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request('artist.getSimilar', True, params)
names = _extract_all(doc, "name")
matches = _extract_all(doc, "match")
artists = []
for i in range(0, len(names)):
artists.append(SimilarItem(Artist(names[i], self.network), _number(matches[i])))
return artists
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('artist.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("artist.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this artist the most.
# Parameters:
* limit int: Max elements.
"""
doc = self._request('artist.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this artist (sends out recommendations).
# Parameters:
* users [User|str,]: A list that can contain usernames, emails, User objects, or all of them.
* message str: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request('artist.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the artist page on the network.
# Parameters:
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_name())
return self.network._get_url(domain_name, "artist") %{'artist': artist}
def get_images(self, order=IMAGES_ORDER_POPULARITY, limit=None):
"""
Returns a sequence of Image objects
if limit is None it will return all
order can be IMAGES_ORDER_POPULARITY or IMAGES_ORDER_DATE.
If limit==None, it will try to pull all the available data.
"""
images = []
params = self._get_params()
params["order"] = order
nodes = _collect_nodes(limit, self, "artist.getImages", True, params)
for e in nodes:
if _extract(e, "name"):
user = User(_extract(e, "name"), self.network)
else:
user = None
images.append(Image(
_extract(e, "title"),
_extract(e, "url"),
_extract(e, "dateadded"),
_extract(e, "format"),
user,
ImageSizes(*_extract_all(e, "size")),
(_extract(e, "thumbsup"), _extract(e, "thumbsdown"))
)
)
return images
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "artist.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("artist.Shout", False, params)
class Event(_BaseObject):
"""An event."""
id = None
def __init__(self, event_id, network):
_BaseObject.__init__(self, network)
self.id = event_id
def __repr__(self):
return "pylast.Event(%s, %s)" %(repr(self.id), repr(self.network))
@_string_output
def __str__(self):
return "Event #" + self.get_id()
def __eq__(self, other):
return self.get_id() == other.get_id()
def __ne__(self, other):
return self.get_id() != other.get_id()
def _get_params(self):
return {'event': self.get_id()}
def attend(self, attending_status):
"""Sets the attending status.
* attending_status: The attending status. Possible values:
o EVENT_ATTENDING
o EVENT_MAYBE_ATTENDING
o EVENT_NOT_ATTENDING
"""
params = self._get_params()
params['status'] = attending_status
self._request('event.attend', False, params)
def get_attendees(self):
"""
Get a list of attendees for an event
"""
doc = self._request("event.getAttendees", False)
users = []
for name in _extract_all(doc, "name"):
users.append(User(name, self.network))
return users
def get_id(self):
"""Returns the id of the event on the network. """
return self.id
def get_title(self):
"""Returns the title of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "title")
def get_headliner(self):
"""Returns the headliner of the event. """
doc = self._request("event.getInfo", True)
return Artist(_extract(doc, "headliner"), self.network)
def get_artists(self):
"""Returns a list of the participating Artists. """
doc = self._request("event.getInfo", True)
names = _extract_all(doc, "artist")
artists = []
for name in names:
artists.append(Artist(name, self.network))
return artists
def get_venue(self):
"""Returns the venue where the event is held."""
doc = self._request("event.getInfo", True)
v = doc.getElementsByTagName("venue")[0]
venue_id = _number(_extract(v, "id"))
location = {}
location['name'] = _extract(v, "name")
location['city'] = _extract(v, "city")
location['country'] = _extract(v, "country")
location['street'] = _extract(v, "street")
location['lat'] = _extract(v, "geo:lat")
location['lng'] = _extract(v, "geo:long")
return Venue(venue_id, self.network), location
def get_start_date(self):
"""Returns the date when the event starts."""
doc = self._request("event.getInfo", True)
return _extract(doc, "startDate")
def get_end_date(self):
"""Returns the date when the event starts."""
doc = self._request("event.getInfo", True)
return _extract(doc, "endDate")
def get_description(self):
"""Returns the description of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "description")
def get_cover_image(self, size = COVER_MEGA):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
doc = self._request("event.getInfo", True)
return _extract_all(doc, "image")[size]
def get_attendance_count(self):
"""Returns the number of attending people. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "attendance"))
def get_review_count(self):
"""Returns the number of available reviews for this event. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "reviews"))
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
return self.network._get_url(domain_name, "event") %{'id': self.get_id()}
def get_website(self):
"""Return the website of the event."""
doc = self._request("event.getInfo", True)
# there will be always two website sections, the first one will be the
# website for venue.
return _extract_all(doc, "website")[1]
def share(self, users, message = None):
"""Shares this event (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request('event.share', False, params)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "event.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("event.Shout", False, params)
class Country(_BaseObject):
"""A country at Last.fm."""
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
def __repr__(self):
return "pylast.Country(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'country': self.get_name()}
def _get_name_from_code(self, alpha2code):
# TODO: Have this function lookup the alpha-2 code and return the country name.
return alpha2code
def get_name(self):
"""Returns the country name. """
return self.name
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('geo.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a sequence of the most played tracks"""
doc = self._request("geo.getTopTracks", True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'name')
artist = _extract(n, 'name', 1)
playcount = _number(_extract(n, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
country_name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "country") %{'country_name': country_name}
class Library(_BaseObject):
"""A user's Last.fm library."""
user = None
def __init__(self, user, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self._albums_index = 0
self._artists_index = 0
self._tracks_index = 0
def __repr__(self):
return "pylast.Library(%s, %s)" %(repr(self.user), repr(self.network))
@_string_output
def __str__(self):
return repr(self.get_user()) + "'s Library"
def _get_params(self):
return {'user': self.user.get_name()}
def get_user(self):
"""Returns the user who owns this library."""
return self.user
def add_album(self, album):
"""Add an album to this library."""
params = self._get_params()
params["artist"] = album.get_artist.get_name()
params["album"] = album.get_name()
self._request("library.addAlbum", False, params)
def add_artist(self, artist):
"""Add an artist to this library."""
params = self._get_params()
params["artist"] = artist.get_name()
self._request("library.addArtist", False, params)
def add_track(self, track):
"""Add a track to this library."""
params = self._get_params()
params["track"] = track.get_title()
self._request("library.addTrack", False, params)
def get_albums(self, artist=None, limit=50):
"""
Returns a sequence of Album objects
If no artist is specified, it will return all, sorted by playcount descendingly.
If limit==None it will return all (may take a while)
"""
params = self._get_params()
if artist:
params["artist"] = artist
seq = []
for node in _collect_nodes(limit, self, "library.getAlbums", True, params):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Album(artist, name, self.network), playcount, tagcount))
return seq
def get_artists(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getArtists", True):
name = _extract(node, "name")
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Artist(name, self.network), playcount, tagcount))
return seq
def get_tracks(self, artist=None, album=None, limit=50):
"""
Returns a sequence of Album objects
If limit==None it will return all (may take a while)
"""
params = self._get_params()
if artist:
params["artist"] = artist
if album:
params["album"] = album
seq = []
for node in _collect_nodes(limit, self, "library.getTracks", True, params):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Track(artist, name, self.network), playcount, tagcount))
return seq
class Playlist(_BaseObject):
"""A Last.fm user playlist."""
id = None
user = None
def __init__(self, user, id, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self.id = id
@_string_output
def __str__(self):
return repr(self.user) + "'s playlist # " + repr(self.id)
def _get_info_node(self):
"""Returns the node from user.getPlaylists where this playlist's info is."""
doc = self._request("user.getPlaylists", True)
for node in doc.getElementsByTagName("playlist"):
if _extract(node, "id") == str(self.get_id()):
return node
def _get_params(self):
return {'user': self.user.get_name(), 'playlistID': self.get_id()}
def get_id(self):
"""Returns the playlist id."""
return self.id
def get_user(self):
"""Returns the owner user of this playlist."""
return self.user
def get_tracks(self):
"""Returns a list of the tracks on this user playlist."""
uri = _unicode('lastfm://playlist/%s') %self.get_id()
return XSPF(uri, self.network).get_tracks()
def add_track(self, track):
"""Adds a Track to this Playlist."""
params = self._get_params()
params['artist'] = track.get_artist().get_name()
params['track'] = track.get_title()
self._request('playlist.addTrack', False, params)
def get_title(self):
"""Returns the title of this playlist."""
return _extract(self._get_info_node(), "title")
def get_creation_date(self):
"""Returns the creation date of this playlist."""
return _extract(self._get_info_node(), "date")
def get_size(self):
"""Returns the number of tracks in this playlist."""
return _number(_extract(self._get_info_node(), "size"))
def get_description(self):
"""Returns the description of this playlist."""
return _extract(self._get_info_node(), "description")
def get_duration(self):
"""Returns the duration of this playlist in milliseconds."""
return _number(_extract(self._get_info_node(), "duration"))
def is_streamable(self):
"""Returns True if the playlist is streamable.
For a playlist to be streamable, it needs at least 45 tracks by 15 different artists."""
if _extract(self._get_info_node(), "streamable") == '1':
return True
else:
return False
def has_track(self, track):
"""Checks to see if track is already in the playlist.
* track: Any Track object.
"""
return track in self.get_tracks()
def get_cover_image(self, size = COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract(self._get_info_node(), "image")[size]
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the playlist on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
english_url = _extract(self._get_info_node(), "url")
appendix = english_url[english_url.rfind("/") + 1:]
return self.network._get_url(domain_name, "playlist") %{'appendix': appendix, "user": self.get_user().get_name()}
class Tag(_BaseObject):
"""A Last.fm object tag."""
# TODO: getWeeklyArtistChart (too lazy, i'll wait for when someone requests it)
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
def __repr__(self):
return "pylast.Tag(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {'tag': self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the name of the tag. """
if properly_capitalized:
self.name = _extract(self._request("tag.getInfo", True), "name")
return self.name
def get_similar(self):
"""Returns the tags similar to this one, ordered by similarity. """
doc = self._request('tag.getSimilar', True)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(Tag(name, self.network))
return seq
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('tag.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("tag.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('tag.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("tag.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("tag.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "weight"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the tag page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "tag") %{'name': name}
class Track(_BaseObject, _Taggable):
"""A Last.fm track."""
artist = None
title = None
def __init__(self, artist, title, network):
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'track')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
def __repr__(self):
return "pylast.Track(%s, %s, %s)" %(repr(self.artist.name), repr(self.title), repr(self.network))
@_string_output
def __str__(self):
return self.get_artist().get_name() + ' - ' + self.get_title()
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'track': self.get_title()}
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self, properly_capitalized=False):
"""Returns the track title."""
if properly_capitalized:
self.title = _extract(self._request("track.getInfo", True), "name")
return self.title
def get_name(self, properly_capitalized=False):
"""Returns the track title (alias to Track.get_title)."""
return self.get_title(properly_capitalized)
def get_id(self):
"""Returns the track id on the network."""
doc = self._request("track.getInfo", True)
return _extract(doc, "id")
def get_duration(self):
"""Returns the track duration."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "duration"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this track."""
doc = self._request("track.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the listener count."""
if hasattr(self, "listener_count"):
return self.listener_count
else:
doc = self._request("track.getInfo", True)
self.listener_count = _number(_extract(doc, "listeners"))
return self.listener_count
def get_playcount(self):
"""Returns the play count."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "playcount"))
def is_streamable(self):
"""Returns True if the track is available at Last.fm."""
doc = self._request("track.getInfo", True)
return _extract(doc, "streamable") == "1"
def is_fulltrack_available(self):
"""Returns True if the fulltrack is available for streaming."""
doc = self._request("track.getInfo", True)
return doc.getElementsByTagName("streamable")[0].getAttribute("fulltrack") == "1"
def get_album(self):
"""Returns the album object of this track."""
doc = self._request("track.getInfo", True)
albums = doc.getElementsByTagName("album")
if len(albums) == 0:
return
node = doc.getElementsByTagName("album")[0]
return Album(_extract(node, "artist"), _extract(node, "title"), self.network)
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
def love(self):
"""Adds the track to the user's loved tracks. """
self._request('track.love')
def ban(self):
"""Ban this track from ever playing on the radio. """
self._request('track.ban')
def get_similar(self):
"""Returns similar tracks for this track on the network, based on listening data. """
doc = self._request('track.getSimilar', True)
seq = []
for node in doc.getElementsByTagName("track"):
title = _extract(node, 'name')
artist = _extract(node, 'name', 1)
match = _number(_extract(node, "match"))
seq.append(SimilarItem(Track(artist, title, self.network), match))
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this track."""
doc = self._request('track.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this track (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request('track.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the track page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
title = _url_safe(self.get_title())
return self.network._get_url(domain_name, "track") %{'domain': self.network._get_language_domain(domain_name), 'artist': artist, 'title': title}
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "track.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
class Group(_BaseObject):
"""A Last.fm group."""
name = None
def __init__(self, group_name, network):
_BaseObject.__init__(self, network)
self.name = group_name
def __repr__(self):
return "pylast.Group(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'group': self.get_name()}
def get_name(self):
"""Returns the group name. """
return self.name
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("group.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the group page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "group") %{'name': name}
def get_members(self, limit=50):
"""
Returns a sequence of User objects
if limit==None it will return all
"""
nodes = _collect_nodes(limit, self, "group.getMembers", False)
users = []
for node in nodes:
users.append(User(_extract(node, "name"), self.network))
return users
class XSPF(_BaseObject):
"A Last.fm XSPF playlist."""
uri = None
def __init__(self, uri, network):
_BaseObject.__init__(self, network)
self.uri = uri
def _get_params(self):
return {'playlistURL': self.get_uri()}
@_string_output
def __str__(self):
return self.get_uri()
def __eq__(self, other):
return self.get_uri() == other.get_uri()
def __ne__(self, other):
return self.get_uri() != other.get_uri()
def get_uri(self):
"""Returns the Last.fm playlist URI. """
return self.uri
def get_tracks(self):
"""Returns the tracks on this playlist."""
doc = self._request('playlist.fetch', True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'title')
artist = _extract(n, 'creator')
seq.append(Track(artist, title, self.network))
return seq
class User(_BaseObject):
"""A Last.fm user."""
name = None
def __init__(self, user_name, network):
_BaseObject.__init__(self, network)
self.name = user_name
self._past_events_index = 0
self._recommended_events_index = 0
self._recommended_artists_index = 0
def __repr__(self):
return "pylast.User(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, another):
return self.get_name() == another.get_name()
def __ne__(self, another):
return self.get_name() != another.get_name()
def _get_params(self):
return {"user": self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the nuser name."""
if properly_capitalized:
self.name = _extract(self._request("user.getInfo", True), "name")
return self.name
def get_upcoming_events(self):
"""Returns all the upcoming events for this user. """
doc = self._request('user.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_friends(self, limit = 50):
"""Returns a list of the user's friends. """
seq = []
for node in _collect_nodes(limit, self, "user.getFriends", False):
seq.append(User(_extract(node, "name"), self.network))
return seq
def get_loved_tracks(self, limit=50):
"""Returns this user's loved track as a sequence of LovedTrack objects
in reverse order of their timestamp, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = limit
seq = []
for track in _collect_nodes(limit, self, "user.getLovedTracks", True, params):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(LovedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_neighbours(self, limit = 50):
"""Returns a list of the user's friends."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request('user.getNeighbours', True, params)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(User(name, self.network))
return seq
def get_past_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for n in _collect_nodes(limit, self, "user.getPastEvents", False):
seq.append(Event(_extract(n, "id"), self.network))
return seq
def get_playlists(self):
"""Returns a list of Playlists that this user owns."""
doc = self._request("user.getPlaylists", True)
playlists = []
for playlist_id in _extract_all(doc, "id"):
playlists.append(Playlist(self.get_name(), playlist_id, self.network))
return playlists
def get_now_playing(self):
"""Returns the currently playing track, or None if nothing is playing. """
params = self._get_params()
params['limit'] = '1'
doc = self._request('user.getRecentTracks', False, params)
e = doc.getElementsByTagName('track')[0]
if not e.hasAttribute('nowplaying'):
return None
artist = _extract(e, 'artist')
title = _extract(e, 'name')
return Track(artist, title, self.network)
def get_recent_tracks(self, limit = 10):
"""Returns this user's played track as a sequence of PlayedTrack objects
in reverse order of their playtime, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = limit
seq = []
for track in _collect_nodes(limit, self, "user.getRecentTracks", True, params):
if track.hasAttribute('nowplaying'):
continue #to prevent the now playing track from sneaking in here
title = _extract(track, "name")
artist = _extract(track, "artist")
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(PlayedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_id(self):
"""Returns the user id."""
doc = self._request("user.getInfo", True)
return _extract(doc, "id")
def get_language(self):
"""Returns the language code of the language used by the user."""
doc = self._request("user.getInfo", True)
return _extract(doc, "lang")
def get_country(self):
"""Returns the name of the country of the user."""
doc = self._request("user.getInfo", True)
return Country(_extract(doc, "country"), self.network)
def get_age(self):
"""Returns the user's age."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "age"))
def get_gender(self):
"""Returns the user's gender. Either USER_MALE or USER_FEMALE."""
doc = self._request("user.getInfo", True)
value = _extract(doc, "gender")
if value == 'm':
return USER_MALE
elif value == 'f':
return USER_FEMALE
return None
def is_subscriber(self):
"""Returns whether the user is a subscriber or not. True or False."""
doc = self._request("user.getInfo", True)
return _extract(doc, "subscriber") == "1"
def get_playcount(self):
"""Returns the user's playcount so far."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "playcount"))
def get_top_albums(self, period = PERIOD_OVERALL):
"""Returns the top albums played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopAlbums', True, params)
seq = []
for album in doc.getElementsByTagName('album'):
name = _extract(album, 'name')
artist = _extract(album, 'name', 1)
playcount = _extract(album, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_artists(self, period = PERIOD_OVERALL):
"""Returns the top artists played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopArtists', True, params)
seq = []
for node in doc.getElementsByTagName('artist'):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tags(self, limit=None):
"""Returns a sequence of the top tags used by this user with their counts as TopItem objects.
* limit: The limit of how many tags to return.
"""
doc = self._request("user.getTopTags", True)
seq = []
for node in doc.getElementsByTagName("tag"):
seq.append(TopItem(Tag(_extract(node, "name"), self.network), _extract(node, "count")))
if limit:
seq = seq[:limit]
return seq
def get_top_tracks(self, period = PERIOD_OVERALL):
"""Returns the top tracks played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopTracks', True, params)
seq = []
for track in doc.getElementsByTagName('track'):
name = _extract(track, 'name')
artist = _extract(track, 'name', 1)
playcount = _extract(track, "playcount")
seq.append(TopItem(Track(artist, name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("user.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def compare_with_user(self, user, shared_artists_limit = None):
"""Compare this user with another Last.fm user.
Returns a sequence (tasteometer_score, (shared_artist1, shared_artist2, ...))
user: A User object or a username string/unicode object.
"""
if isinstance(user, User):
user = user.get_name()
params = self._get_params()
if shared_artists_limit:
params['limit'] = shared_artists_limit
params['type1'] = 'user'
params['type2'] = 'user'
params['value1'] = self.get_name()
params['value2'] = user
doc = self._request('tasteometer.compare', False, params)
score = _extract(doc, 'score')
artists = doc.getElementsByTagName('artists')[0]
shared_artists_names = _extract_all(artists, 'name')
shared_artists_seq = []
for name in shared_artists_names:
shared_artists_seq.append(Artist(name, self.network))
return (score, shared_artists_seq)
def get_image(self):
"""Returns the user's avatar."""
doc = self._request("user.getInfo", True)
return _extract(doc, "image")
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the user page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "user") %{'name': name}
def get_library(self):
"""Returns the associated Library object. """
return Library(self, self.network)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "user.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("user.Shout", False, params)
class AuthenticatedUser(User):
def __init__(self, network):
User.__init__(self, "", network);
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the name of the authenticated user."""
doc = self._request("user.getInfo", True, {"user": ""}) # hack
self.name = _extract(doc, "name")
return self.name
def get_recommended_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedEvents", False):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_recommended_artists(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedArtists", False):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class _Search(_BaseObject):
"""An abstract class. Use one of its derivatives."""
def __init__(self, ws_prefix, search_terms, network):
_BaseObject.__init__(self, network)
self._ws_prefix = ws_prefix
self.search_terms = search_terms
self._last_page_index = 0
def _get_params(self):
params = {}
for key in self.search_terms.keys():
params[key] = self.search_terms[key]
return params
def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + ".search", True)
return _extract(doc, "opensearch:totalResults")
def _retreive_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
def _retrieve_next_page(self):
self._last_page_index += 1
return self._retreive_page(self._last_page_index)
class AlbumSearch(_Search):
"""Search for an album by name."""
def __init__(self, album_name, network):
_Search.__init__(self, "album", {"album": album_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Album objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("album"):
seq.append(Album(_extract(node, "artist"), _extract(node, "name"), self.network))
return seq
class ArtistSearch(_Search):
"""Search for an artist by artist name."""
def __init__(self, artist_name, network):
_Search.__init__(self, "artist", {"artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Artist objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("artist"):
artist = Artist(_extract(node, "name"), self.network)
artist.listener_count = _number(_extract(node, "listeners"))
seq.append(artist)
return seq
class TagSearch(_Search):
"""Search for a tag by tag name."""
def __init__(self, tag_name, network):
_Search.__init__(self, "tag", {"tag": tag_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Tag objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self.network)
tag.tag_count = _number(_extract(node, "count"))
seq.append(tag)
return seq
class TrackSearch(_Search):
"""Search for a track by track title. If you don't wanna narrow the results down
by specifying the artist name, set it to empty string."""
def __init__(self, artist_name, track_title, network):
_Search.__init__(self, "track", {"track": track_title, "artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("track"):
track = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
track.listener_count = _number(_extract(node, "listeners"))
seq.append(track)
return seq
class VenueSearch(_Search):
"""Search for a venue by its name. If you don't wanna narrow the results down
by specifying a country, set it to empty string."""
def __init__(self, venue_name, country_name, network):
_Search.__init__(self, "venue", {"venue": venue_name, "country": country_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("venue"):
seq.append(Venue(_extract(node, "id"), self.network))
return seq
class Venue(_BaseObject):
"""A venue where events are held."""
# TODO: waiting for a venue.getInfo web service to use.
id = None
def __init__(self, id, network):
_BaseObject.__init__(self, network)
self.id = _number(id)
def __repr__(self):
return "pylast.Venue(%s, %s)" %(repr(self.id), repr(self.network))
@_string_output
def __str__(self):
return "Venue #" + str(self.id)
def __eq__(self, other):
return self.get_id() == other.get_id()
def _get_params(self):
return {"venue": self.get_id()}
def get_id(self):
"""Returns the id of the venue."""
return self.id
def get_upcoming_events(self):
"""Returns the upcoming events in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_past_events(self):
"""Returns the past events held in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_unicode(text).encode("utf-8"))
return h.hexdigest()
def _unicode(text):
if sys.version_info[0] == 3:
if type(text) in (bytes, bytearray):
return str(text, "utf-8")
elif type(text) == str:
return text
else:
return str(text)
elif sys.version_info[0] ==2:
if type(text) in (str,):
return unicode(text, "utf-8")
elif type(text) == unicode:
return text
else:
return unicode(text)
def _string(text):
"""For Python2 routines that can only process str type."""
if sys.version_info[0] == 3:
if type(text) != str:
return str(text)
else:
return text
elif sys.version_info[0] == 2:
if type(text) == str:
return text
if type(text) == int:
return str(text)
return text.encode("utf-8")
def _collect_nodes(limit, sender, method_name, cacheable, params=None):
"""
Returns a sequqnce of dom.Node objects about as close to
limit as possible
"""
if not params:
params = sender._get_params()
nodes = []
page = 1
end_of_pages = False
while not end_of_pages and (not limit or (limit and len(nodes) < limit)):
params["page"] = str(page)
doc = sender._request(method_name, cacheable, params)
main = doc.documentElement.childNodes[1]
if main.hasAttribute("totalPages"):
total_pages = _number(main.getAttribute("totalPages"))
elif main.hasAttribute("totalpages"):
total_pages = _number(main.getAttribute("totalpages"))
else:
raise Exception("No total pages attribute")
for node in main.childNodes:
if not node.nodeType == xml.dom.Node.TEXT_NODE and len(nodes) < limit:
nodes.append(node)
if page >= total_pages:
end_of_pages = True
page += 1
return nodes
def _extract(node, name, index = 0):
"""Extracts a value from the xml string"""
nodes = node.getElementsByTagName(name)
if len(nodes):
if nodes[index].firstChild:
return _unescape_htmlentity(nodes[index].firstChild.data.strip())
else:
return None
def _extract_all(node, name, limit_count = None):
"""Extracts all the values from the xml string. returning a list."""
seq = []
for i in range(0, len(node.getElementsByTagName(name))):
if len(seq) == limit_count:
break
seq.append(_extract(node, name, i))
return seq
def _url_safe(text):
"""Does all kinds of tricks on a text to make it safe to use in a url."""
return url_quote_plus(url_quote_plus(_string(text))).lower()
def _number(string):
"""
Extracts an int from a string. Returns a 0 if None or an empty string was passed
"""
if not string:
return 0
elif string == "":
return 0
else:
try:
return int(string)
except ValueError:
return float(string)
def _unescape_htmlentity(string):
#string = _unicode(string)
mapping = htmlentitydefs.name2codepoint
for key in mapping:
string = string.replace("&%s;" %key, unichr(mapping[key]))
return string
def extract_items(topitems_or_libraryitems):
"""Extracts a sequence of items from a sequence of TopItem or LibraryItem objects."""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq
class ScrobblingError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
@_string_output
def __str__(self):
return self.message
class BannedClientError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "This version of the client has been banned")
class BadAuthenticationError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad authentication token")
class BadTimeError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Time provided is not close enough to current time")
class BadSessionError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad session id, consider re-handshaking")
class _ScrobblerRequest(object):
def __init__(self, url, params, network, type="POST"):
for key in params:
params[key] = str(params[key])
self.params = params
self.type = type
(self.hostname, self.subdir) = url_split_host(url[len("http:"):])
self.network = network
def execute(self):
"""Returns a string response of this request."""
connection = HTTPConnection(self.hostname)
data = []
for name in self.params.keys():
value = url_quote_plus(self.params[name])
data.append('='.join((name, value)))
data = "&".join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept-Charset": "utf-8",
"User-Agent": "pylast" + "/" + __version__,
"HOST": self.hostname
}
if self.type == "GET":
connection.request("GET", self.subdir + "?" + data, headers = headers)
else:
connection.request("POST", self.subdir, data, headers)
response = _unicode(connection.getresponse().read())
self._check_response_for_errors(response)
return response
def _check_response_for_errors(self, response):
"""When passed a string response it checks for erros, raising
any exceptions as necessary."""
lines = response.split("\n")
status_line = lines[0]
if status_line == "OK":
return
elif status_line == "BANNED":
raise BannedClientError()
elif status_line == "BADAUTH":
raise BadAuthenticationError()
elif status_line == "BADTIME":
raise BadTimeError()
elif status_line == "BADSESSION":
raise BadSessionError()
elif status_line.startswith("FAILED "):
reason = status_line[status_line.find("FAILED ")+len("FAILED "):]
raise ScrobblingError(reason)
class Scrobbler(object):
"""A class for scrobbling tracks to Last.fm"""
session_id = None
nowplaying_url = None
submissions_url = None
def __init__(self, network, client_id, client_version):
self.client_id = client_id
self.client_version = client_version
self.username = network.username
self.password = network.password_hash
self.network = network
def _do_handshake(self):
"""Handshakes with the server"""
timestamp = str(int(time.time()))
if self.password and self.username:
token = md5(self.password + timestamp)
elif self.network.api_key and self.network.api_secret and self.network.session_key:
if not self.username:
self.username = self.network.get_authenticated_user().get_name()
token = md5(self.network.api_secret + timestamp)
params = {"hs": "true", "p": "1.2.1", "c": self.client_id,
"v": self.client_version, "u": self.username, "t": timestamp,
"a": token}
if self.network.session_key and self.network.api_key:
params["sk"] = self.network.session_key
params["api_key"] = self.network.api_key
server = self.network.submission_server
response = _ScrobblerRequest(server, params, self.network, "GET").execute().split("\n")
self.session_id = response[1]
self.nowplaying_url = response[2]
self.submissions_url = response[3]
def _get_session_id(self, new = False):
"""Returns a handshake. If new is true, then it will be requested from the server
even if one was cached."""
if not self.session_id or new:
self._do_handshake()
return self.session_id
def report_now_playing(self, artist, title, album = "", duration = "", track_number = "", mbid = ""):
_deprecation_warning("DeprecationWarning: Use Netowrk.update_now_playing(...) instead")
params = {"s": self._get_session_id(), "a": artist, "t": title,
"b": album, "l": duration, "n": track_number, "m": mbid}
try:
_ScrobblerRequest(self.nowplaying_url, params, self.network).execute()
except BadSessionError:
self._do_handshake()
self.report_now_playing(artist, title, album, duration, track_number, mbid)
def scrobble(self, artist, title, time_started, source, mode, duration, album="", track_number="", mbid=""):
"""Scrobble a track. parameters:
artist: Artist name.
title: Track title.
time_started: UTC timestamp of when the track started playing.
source: The source of the track
SCROBBLE_SOURCE_USER: Chosen by the user (the most common value, unless you have a reason for choosing otherwise, use this).
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST: Non-personalised broadcast (e.g. Shoutcast, BBC Radio 1).
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST: Personalised recommendation except Last.fm (e.g. Pandora, Launchcast).
SCROBBLE_SOURCE_LASTFM: ast.fm (any mode). In this case, the 5-digit recommendation_key value must be set.
SCROBBLE_SOURCE_UNKNOWN: Source unknown.
mode: The submission mode
SCROBBLE_MODE_PLAYED: The track was played.
SCROBBLE_MODE_LOVED: The user manually loved the track (implies a listen)
SCROBBLE_MODE_SKIPPED: The track was skipped (Only if source was Last.fm)
SCROBBLE_MODE_BANNED: The track was banned (Only if source was Last.fm)
duration: Track duration in seconds.
album: The album name.
track_number: The track number on the album.
mbid: MusicBrainz ID.
"""
_deprecation_warning("DeprecationWarning: Use Network.scrobble(...) instead")
params = {"s": self._get_session_id(), "a[0]": _string(artist), "t[0]": _string(title),
"i[0]": str(time_started), "o[0]": source, "r[0]": mode, "l[0]": str(duration),
"b[0]": _string(album), "n[0]": track_number, "m[0]": mbid}
_ScrobblerRequest(self.submissions_url, params, self.network).execute()
def scrobble_many(self, tracks):
"""
Scrobble several tracks at once.
tracks: A sequence of a sequence of parameters for each trach. The order of parameters
is the same as if passed to the scrobble() method.
"""
_deprecation_warning("DeprecationWarning: Use Network.scrobble_many(...) instead")
remainder = []
if len(tracks) > 50:
remainder = tracks[50:]
tracks = tracks[:50]
params = {"s": self._get_session_id()}
i = 0
for t in tracks:
_pad_list(t, 9, "")
params["a[%s]" % str(i)] = _string(t[0])
params["t[%s]" % str(i)] = _string(t[1])
params["i[%s]" % str(i)] = str(t[2])
params["o[%s]" % str(i)] = t[3]
params["r[%s]" % str(i)] = t[4]
params["l[%s]" % str(i)] = str(t[5])
params["b[%s]" % str(i)] = _string(t[6])
params["n[%s]" % str(i)] = t[7]
params["m[%s]" % str(i)] = t[8]
i += 1
_ScrobblerRequest(self.submissions_url, params, self.network).execute()
if remainder:
self.scrobble_many(remainder)
| 0.012502 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.rss_url'
db.add_column('profile_profile', 'rss_url',
self.gf('django.db.models.fields.CharField')(default='', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.rss_url'
db.delete_column('profile_profile', 'rss_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.profile': {
'Meta': {'object_name': 'Profile'},
'account_type': ('django.db.models.fields.IntegerField', [], {'default': 0}),
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'biography': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.IntegerField', [], {'default': 215}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'picture_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'rss_url': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['auth.User']"}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['profile'] | 0.008166 |
# Copyright (c) 2007 Ferran Pegueroles <[email protected]>
# Copyright (c) 2009 Albert Cervera i Areny <[email protected]>
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2013-2014 Camptocamp (<http://www.camptocamp.com>)
# Copyright (C) 2016 SYLEAM (<http://www.syleam.fr>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import errno
import logging
import os
from tempfile import mkstemp
from odoo import models, fields, api
_logger = logging.getLogger(__name__)
try:
import cups
except ImportError:
_logger.debug('Cannot `import cups`.')
class PrintingPrinter(models.Model):
"""
Printers
"""
_name = 'printing.printer'
_description = 'Printer'
_order = 'name'
name = fields.Char(required=True, index=True)
active = fields.Boolean(default=True)
server_id = fields.Many2one(
comodel_name='printing.server', string='Server', required=True,
help='Server used to access this printer.')
job_ids = fields.One2many(
comodel_name='printing.job', inverse_name='printer_id', string='Jobs',
help='Jobs printed on this printer.')
system_name = fields.Char(required=True, index=True)
default = fields.Boolean(readonly=True)
status = fields.Selection(
selection=[
('unavailable', 'Unavailable'),
('printing', 'Printing'),
('unknown', 'Unknown'),
('available', 'Available'),
('error', 'Error'),
('server-error', 'Server Error'),
],
required=True,
readonly=True,
default='unknown')
status_message = fields.Char(readonly=True)
model = fields.Char(readonly=True)
location = fields.Char(readonly=True)
uri = fields.Char(string='URI', readonly=True)
tray_ids = fields.One2many(comodel_name='printing.tray',
inverse_name='printer_id',
string='Paper Sources')
@api.multi
def _prepare_update_from_cups(self, cups_connection, cups_printer):
mapping = {
3: 'available',
4: 'printing',
5: 'error'
}
vals = {
'name': cups_printer['printer-info'],
'model': cups_printer.get('printer-make-and-model', False),
'location': cups_printer.get('printer-location', False),
'uri': cups_printer.get('device-uri', False),
'status': mapping.get(cups_printer.get(
'printer-state'), 'unknown'),
'status_message': cups_printer.get('printer-state-message', ''),
}
printer_uri = cups_printer['printer-uri-supported']
printer_system_name = printer_uri[printer_uri.rfind('/') + 1:]
ppd_info = cups_connection.getPPD3(printer_system_name)
ppd_path = ppd_info[2]
if not ppd_path:
return vals
ppd = cups.PPD(ppd_path)
option = ppd.findOption('InputSlot')
try:
os.unlink(ppd_path)
except OSError as err:
# ENOENT means No such file or directory
# The file has already been deleted, we can continue the update
if err.errno != errno.ENOENT:
raise
if not option:
return vals
vals['tray_ids'] = []
cups_trays = {
tray_option['choice']: tray_option['text']
for tray_option in option.choices
}
# Add new trays
vals['tray_ids'].extend([
(0, 0, {'name': text, 'system_name': choice})
for choice, text in cups_trays.items()
if choice not in self.tray_ids.mapped('system_name')
])
# Remove deleted trays
vals['tray_ids'].extend([
(2, tray.id)
for tray in self.tray_ids.filtered(
lambda record: record.system_name not in cups_trays.keys())
])
return vals
@api.multi
def print_document(self, report, content, **print_opts):
""" Print a file
Format could be pdf, qweb-pdf, raw, ...
"""
self.ensure_one()
fd, file_name = mkstemp()
try:
os.write(fd, content)
finally:
os.close(fd)
return self.print_file(
file_name, report=report, **print_opts)
@staticmethod
def _set_option_doc_format(report, value):
return {'raw': 'True'} if value == 'raw' else {}
# Backwards compatibility of builtin used as kwarg
_set_option_format = _set_option_doc_format
@api.multi
def _set_option_tray(self, report, value):
"""Note we use self here as some older PPD use tray
rather than InputSlot so we may need to query printer in override"""
return {'InputSlot': str(value)} if value else {}
@staticmethod
def _set_option_noop(report, value):
return {}
_set_option_action = _set_option_noop
_set_option_printer = _set_option_noop
@api.multi
def print_options(self, report=None, **print_opts):
options = {}
for option, value in print_opts.items():
try:
options.update(getattr(
self, '_set_option_%s' % option)(report, value))
except AttributeError:
options[option] = str(value)
return options
@api.multi
def print_file(self, file_name, report=None, **print_opts):
""" Print a file """
self.ensure_one()
connection = self.server_id._open_connection(raise_on_error=True)
options = self.print_options(report=report, **print_opts)
_logger.debug(
'Sending job to CUPS printer %s on %s'
% (self.system_name, self.server_id.address))
connection.printFile(self.system_name,
file_name,
file_name,
options=options)
_logger.info("Printing job: '%s' on %s" % (
file_name,
self.server_id.address,
))
return True
@api.multi
def set_default(self):
if not self:
return
self.ensure_one()
default_printers = self.search([('default', '=', True)])
default_printers.unset_default()
self.write({'default': True})
return True
@api.multi
def unset_default(self):
self.write({'default': False})
return True
@api.multi
def get_default(self):
return self.search([('default', '=', True)], limit=1)
@api.multi
def action_cancel_all_jobs(self):
self.ensure_one()
return self.cancel_all_jobs()
@api.multi
def cancel_all_jobs(self, purge_jobs=False):
for printer in self:
connection = printer.server_id._open_connection()
connection.cancelAllJobs(
name=printer.system_name, purge_jobs=purge_jobs)
# Update jobs' states into Odoo
self.mapped('server_id').update_jobs(which='completed')
return True
@api.multi
def enable(self):
for printer in self:
connection = printer.server_id._open_connection()
connection.enablePrinter(printer.system_name)
# Update printers' stats into Odoo
self.mapped('server_id').update_printers()
return True
@api.multi
def disable(self):
for printer in self:
connection = printer.server_id._open_connection()
connection.disablePrinter(printer.system_name)
# Update printers' stats into Odoo
self.mapped('server_id').update_printers()
return True
| 0 |
#!/bin/env python3
# AUTHORS:
# Hakan Ozadam
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
#################################################################
import os
from bal.umass_cluster.prepare import get_arguments, get_executables,\
arrange_input_files
from bal.core.engines import SequentialEngine
from bal.settings import *
from bal.umass_cluster.lsf.PartitionFastqJobGroup import PartitionFastqJobGroup
from bal.umass_cluster.lsf.BalJobGroup import BalJobGroup
from bal.umass_cluster.lsf.MergeBedJobGroup import MergeBedJobGroup
from bal.umass_cluster.lsf.BranchpointRefJobGroup import BranchpointRefJobGroup
from bal.umass_cluster.lsf.AlignBpJobGroup import AlignBpJobGroup
from bal.umass_cluster.lsf.ExtractBpFromSamRefJobGroup import ExtractBpFromSamRefJobGroup
###################################################################
###################################################################
def main():
bal_directory = os.path.dirname(os.path.realpath(__file__))
executables = get_executables(bal_directory)
arguments = get_arguments()
input_directory = os.path.abspath(arguments.i)
output_directory = os.path.abspath(arguments.o)
reference_directory = os.path.abspath(arguments.x)
alignment_mode = arguments.m
reads_per_file = arguments.n
cluster_output_directory = os.path.join(output_directory, ".lsf_out")
partitioned_input_directory = os.path.join(output_directory, "partitioned_input")
partitioned_output_directory = os.path.join(output_directory, "partitioned_output")
merged_bed_directory = os.path.join(output_directory, "merged_bed_files")
candidate_bp_directory = os.path.join(output_directory, "candidate_bp_directory")
bp_reference_directory = os.path.join(output_directory, "bp_bt2_reference")
bp_alignments_directory = os.path.join(output_directory, "bp_alignments")
main_output_directory = os.path.join(output_directory, "output")
genome_fasta_file = os.path.join(reference_directory, "genome.fa")
candidate_bp_genome_level_1_directory = os.path.join(output_directory, "candidate_bp_genome_level_1")
#job_runner = SequentialEngine()
#print("Umass pipeline is working!")
arranged_input_files = arrange_input_files(input_directory, alignment_mode )
####################################
### Partition Input Files #####
####################################
jobGroup = PartitionFastqJobGroup(
input_directory = input_directory ,
output_directory = partitioned_input_directory ,
alignment_mode = alignment_mode ,
run_time = 3 ,
reads_per_file = reads_per_file ,
memory = 4096 ,
executable = executables['partition_fastq']
)
jobGroup.run_in_main()
#####################################
### BAL on Partitioned Data #####
#####################################
bal_main_arguments = arguments.a + " --hpc "
if arguments.rna_strandness == "R":
bal_main_arguments += " --rna-strandness R "
else:
bal_main_arguments += " --rna-strandness F "
bal_threads = arguments.p
if bal_threads < 10:
bal_threads = 10
jobGroup = BalJobGroup(
input_directory = partitioned_input_directory ,
output_directory = partitioned_output_directory ,
arguments = bal_main_arguments ,
reference = reference_directory ,
alignment_mode = alignment_mode ,
threads = bal_threads ,
run_time = arguments.t ,
memory = arguments.memory ,
executable = executables['bal']
)
jobGroup.run_in_main()
#############################################
#### Merge Piece Bed Files ################
#############################################
# Merge the bed files of each library
# Bed files come in pieces as we partition the lib input file
# to speed up the alignment
# First merge the bed files for each library
jobGroup = MergeBedJobGroup(
input_directory = partitioned_output_directory ,
output_directory = merged_bed_directory ,
executable = executables['merge_bed_files']
)
jobGroup.run_in_main()
############################################################
#### Merge Library Candidate BP Bed Files ################
############################################################
# Now we have the merged bed files from each library.
# So we merge ll library files to get the whole list of
# branchpoints coming from all libraries.
merge_bed_jobGroup = MergeBedJobGroup(
input_directory = partitioned_output_directory ,
output_directory = candidate_bp_directory ,
single_file_list = os.path.join(merged_bed_directory, 'candidate_bp_files_list.txt'),
executable = executables['merge_bed_files']
)
merge_bed_jobGroup.run_in_main()
#############################################################
##### Make Bowtie2 Reference ###############################
#############################################################
bp_ref_jobGroup = BranchpointRefJobGroup(
input_directory = os.path.dirname( os.path.abspath(merge_bed_jobGroup.bp_candidates_bed_file) ), # Note that input direcory has no importance in this class
output_directory = bp_reference_directory ,
bed_file = merge_bed_jobGroup.bp_candidates_bed_file ,
fasta_file = genome_fasta_file,
executable = executables['make_bp_ref'],
number_of_nucleotides = settings['number_of_nucleotides'],
name = "Ref_Branchpoint" ,
run_time = 3, # hours
memory = 8000)
bp_ref_jobGroup.run_in_main()
#############################################################
##### Align Reads Against The Reference ####################
#############################################################
align_bp_jobGroup = AlignBpJobGroup(
input_directory = partitioned_output_directory,
output_directory = bp_alignments_directory,
arguments = ' ',
reference = bp_reference_directory,
alignment_mode = alignment_mode,
threads = 8,
name = "bp_align" ,
run_time = 3,
executable = executables['align_bp'],
memory = 8192 ,
rna_strandness = arguments.rna_strandness)
align_bp_jobGroup.run_in_main()
###############################################################
###### Extract BP from SAM Ref ##############################
###############################################################
extract_bp_from_sam_ref_jobGroup = ExtractBpFromSamRefJobGroup(
input_directory = bp_alignments_directory,
output_directory = candidate_bp_genome_level_1_directory,
individual_lib_directory = align_bp_jobGroup.individual_sam_files_directory,
executable = executables['bp_bed_from_sam_ref'] )
extract_bp_from_sam_ref_jobGroup.run_in_main()
#####################################################################
if __name__ == '__main__':
main() | 0.024343 |
"""
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer on an evoked dataset for three different choices of
source orientation and store the solutions in stc files for visualization.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk',
rank=None)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk', rank=None)
evoked.plot(time_unit='s')
###############################################################################
# Run beamformers and look at maximum outputs
pick_oris = [None, 'normal', 'max-power', None]
descriptions = ['Free', 'Normal', 'Max-power', 'Fixed']
fig, ax = plt.subplots(1)
max_voxs = list()
colors = list()
for pick_ori, desc in zip(pick_oris, descriptions):
# compute unit-noise-gain beamformer with whitening of the leadfield and
# data (enabled by passing a noise covariance matrix)
if desc == 'Fixed':
use_forward = mne.convert_forward_solution(forward, force_fixed=True)
else:
use_forward = forward
filters = make_lcmv(evoked.info, use_forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori=pick_ori,
weight_norm='unit-noise-gain', rank=None)
print(filters)
# apply this spatial filter to source-reconstruct the evoked data
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
# View activation time-series in maximum voxel at 100 ms:
time_idx = stc.time_as_index(0.1)
max_idx = np.argmax(np.abs(stc.data[:, time_idx]))
# we know these are all left hemi, so we can just use vertices[0]
max_voxs.append(stc.vertices[0][max_idx])
h = ax.plot(stc.times, stc.data[max_idx, :],
label='%s, voxel: %i' % (desc, max_idx))[0]
colors.append(h.get_color())
if pick_ori == 'max-power':
max_stc = stc
ax.axhline(0, color='k')
ax.set(xlabel='Time (ms)', ylabel='LCMV value',
title='LCMV in maximum voxel')
ax.legend(loc='lower right')
mne.viz.utils.plt_show()
###############################################################################
# We can also look at the spatial distribution
# Plot last stc in the brain in 3D with PySurfer if available
brain = max_stc.plot(hemi='lh', views='lat', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s', smoothing_steps=5)
for color, vertex in zip(colors, max_voxs):
brain.add_foci([vertex], coords_as_verts=True, scale_factor=0.5,
hemi='lh', color=color)
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Keystone's development virtualenv
"""
import os
import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
def die(message, *args):
print >> sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
print 'not found.'
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
if not run_command(['which', 'easy_install']):
die('ERROR: virtualenv not found.\n\n'
'Keystone development requires virtualenv, please install'
' it using your favorite package management tool')
print 'done.'
print 'done.'
def create_virtualenv(venv=VENV):
"""
Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']).strip():
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
# Install greenlet by hand - just listing it in the requires file does not
# get it in stalled in the right order
venv_tool = 'tools/with_venv.sh'
run_command([venv_tool, 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
redirect_output=False)
# Tell the virtual env how to "import keystone"
for version in ['python2.7', 'python2.6']:
pth = os.path.join(venv, "lib", version, "site-packages")
if os.path.exists(pth):
pthfile = os.path.join(pth, "keystone.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
def print_help():
help = """
Keystone development environment setup is complete.
Keystone development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Keystone virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
| 0.000455 |
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for Brocade fc zone driver."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
import paramiko
import requests
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as driver
_active_cfg_before_add = {}
_active_cfg_before_delete = {
'zones': {
'openstack10008c7cff523b0120240002ac000a50': (
['10:00:8c:7c:ff:52:3b:01',
'20:24:00:02:ac:00:0a:50']), 't_zone': ['1,0']},
'active_zone_config': 'cfg1'}
_activate = True
_zone_name = 'openstack10008c7cff523b0120240002ac000a50'
_target_ns_map = {'100000051e55a100': ['20240002ac000a50']}
_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']}
_zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': (
['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])}
_initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']}
_device_map_to_verify = {
'100000051e55a100': {
'initiator_port_wwn_list': [
'10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}}
_fabric_wwn = '100000051e55a100'
class BrcdFcZoneDriverBaseTest(object):
def setup_config(self, is_normal, mode):
fc_test_opts = [
cfg.StrOpt('fc_fabric_address_BRCD_FAB_1', default='10.24.48.213',
help='FC Fabric names'),
]
configuration = conf.Configuration(fc_test_opts)
# fill up config
configuration.zoning_mode = 'fabric'
configuration.zone_driver = ('cinder.tests.unit.zonemanager.'
'test_brcd_fc_zone_driver.'
'FakeBrcdFCZoneDriver')
configuration.brcd_sb_connector = ('cinder.tests.unit.zonemanager.'
'test_brcd_fc_zone_driver'
'.FakeBrcdFCZoneClientCLI')
configuration.zoning_policy = 'initiator-target'
configuration.zone_activate = True
configuration.zone_name_prefix = 'openstack'
configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager.'
'test_brcd_fc_zone_driver.'
'FakeBrcdFCSanLookupService')
configuration.fc_fabric_names = 'BRCD_FAB_1'
configuration.fc_fabric_address_BRCD_FAB_1 = '10.24.48.213'
configuration.fc_southbound_connector = 'CLI'
if is_normal:
configuration.fc_fabric_user_BRCD_FAB_1 = 'admin'
else:
configuration.fc_fabric_user_BRCD_FAB_1 = 'invaliduser'
configuration.fc_fabric_password_BRCD_FAB_1 = 'password'
if mode == 1:
configuration.zoning_policy_BRCD_FAB_1 = 'initiator-target'
elif mode == 2:
configuration.zoning_policy_BRCD_FAB_1 = 'initiator'
else:
configuration.zoning_policy_BRCD_FAB_1 = 'initiator-target'
configuration.zone_activate_BRCD_FAB_1 = True
configuration.zone_name_prefix_BRCD_FAB_1 = 'openstack_fab1'
return configuration
class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
def setUp(self):
super(TestBrcdFcZoneDriver, self).setUp()
# setup config for normal flow
self.setup_driver(self.setup_config(True, 1))
GlobalVars._zone_state = []
def setup_driver(self, config):
self.driver = importutils.import_object(
'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver', configuration=config)
def fake__get_active_zone_set(self, brcd_sb_connector, fabric_ip):
return GlobalVars._active_cfg
def get_client(self, protocol='HTTPS'):
conn = ('cinder.tests.unit.zonemanager.test_brcd_fc_zone_driver.' +
('FakeBrcdFCZoneClientCLI' if protocol == "CLI"
else 'FakeBrcdHttpFCZoneClient'))
client = importutils.import_object(
conn,
ipaddress="10.24.48.213",
username="admin",
password="password",
key="/home/stack/.ssh/id_rsa",
port=22,
vfid="2",
protocol=protocol
)
return client
def fake_get_san_context(self, target_wwn_list):
fabric_map = {}
return fabric_map
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client')
def test_add_connection(self, get_southbound_client_mock):
"""Normal flow for i-t mode."""
GlobalVars._is_normal_test = True
GlobalVars._zone_state = []
GlobalVars._active_cfg = _active_cfg_before_add
get_southbound_client_mock.return_value = self.get_client("HTTPS")
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertIn(_zone_name, GlobalVars._zone_state)
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client')
def test_delete_connection(self, get_southbound_client_mock):
GlobalVars._is_normal_test = True
get_southbound_client_mock.return_value = self.get_client("CLI")
GlobalVars._active_cfg = _active_cfg_before_delete
self.driver.delete_connection(
'BRCD_FAB_1', _initiator_target_map)
self.assertNotIn(_zone_name, GlobalVars._zone_state)
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client')
def test_add_connection_for_initiator_mode(self, get_southbound_client_mk):
"""Normal flow for i mode."""
GlobalVars._is_normal_test = True
get_southbound_client_mk.return_value = self.get_client("CLI")
GlobalVars._active_cfg = _active_cfg_before_add
self.setup_driver(self.setup_config(True, 2))
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertIn(_zone_name, GlobalVars._zone_state)
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client')
def test_delete_connection_for_initiator_mode(self,
get_southbound_client_mk):
GlobalVars._is_normal_test = True
get_southbound_client_mk.return_value = self.get_client("HTTPS")
GlobalVars._active_cfg = _active_cfg_before_delete
self.setup_driver(self.setup_config(True, 2))
self.driver.delete_connection(
'BRCD_FAB_1', _initiator_target_map)
self.assertNotIn(_zone_name, GlobalVars._zone_state)
def test_add_connection_for_invalid_fabric(self):
"""Test abnormal flows."""
GlobalVars._is_normal_test = True
GlobalVars._active_cfg = _active_cfg_before_add
GlobalVars._is_normal_test = False
self.setup_driver(self.setup_config(False, 1))
self.assertRaises(exception.FCZoneDriverException,
self.driver.add_connection,
'BRCD_FAB_1',
_initiator_target_map)
def test_delete_connection_for_invalid_fabric(self):
GlobalVars._active_cfg = _active_cfg_before_delete
GlobalVars._is_normal_test = False
self.setup_driver(self.setup_config(False, 1))
self.assertRaises(exception.FCZoneDriverException,
self.driver.delete_connection,
'BRCD_FAB_1',
_initiator_target_map)
class FakeClient(object):
def get_active_zone_set(self):
return GlobalVars._active_cfg
def add_zones(self, zones, isActivate, active_zone_set):
GlobalVars._zone_state.extend(zones.keys())
def delete_zones(self, zone_names, isActivate, active_zone_set):
zone_list = zone_names.split(';')
GlobalVars._zone_state = [
x for x in GlobalVars._zone_state if x not in zone_list]
def is_supported_firmware(self):
return True
def get_nameserver_info(self):
return _target_ns_map
def close_connection(self):
pass
def cleanup(self):
pass
class FakeBrcdFCZoneClientCLI(FakeClient):
def __init__(self, ipaddress, username,
password, port, key, vfid, protocol):
self.firmware_supported = True
if not GlobalVars._is_normal_test:
raise paramiko.SSHException("Unable to connect to fabric.")
class FakeBrcdHttpFCZoneClient(FakeClient):
def __init__(self, ipaddress, username,
password, port, key, vfid, protocol):
self.firmware_supported = True
if not GlobalVars._is_normal_test:
raise requests.exception.HTTPError("Unable to connect to fabric")
class FakeBrcdFCSanLookupService(object):
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
device_map = {}
initiators = []
targets = []
for i in initiator_wwn_list:
if i in _initiator_ns_map[_fabric_wwn]:
initiators.append(i)
for t in target_wwn_list:
if t in _target_ns_map[_fabric_wwn]:
targets.append(t)
device_map[_fabric_wwn] = {
'initiator_port_wwn_list': initiators,
'target_port_wwn_list': targets}
return device_map
class GlobalVars(object):
global _active_cfg
_active_cfg = {}
global _zone_state
_zone_state = list()
global _is_normal_test
_is_normal_test = True
| 0 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys, textwrap, traceback, StringIO
from functools import partial
from codeop import CommandCompiler
from PyQt4.Qt import QTextEdit, Qt, QTextFrameFormat, pyqtSignal, \
QApplication, QColor, QPalette, QMenu, QActionGroup, QTimer
from pygments.lexers import PythonLexer, PythonTracebackLexer
from pygments.styles import get_all_styles
from calibre.utils.pyconsole.formatter import Formatter
from calibre.utils.pyconsole.controller import Controller
from calibre.utils.pyconsole.history import History
from calibre.utils.pyconsole import prints, prefs, __appname__, \
__version__, error_dialog, dynamic
class EditBlock(object): # {{{
def __init__(self, cursor):
self.cursor = cursor
def __enter__(self):
self.cursor.beginEditBlock()
return self.cursor
def __exit__(self, *args):
self.cursor.endEditBlock()
# }}}
class Prepender(object): # {{{
'Helper class to insert output before the current prompt'
def __init__(self, console):
self.console = console
def __enter__(self):
c = self.console
self.opos = c.cursor_pos
cur = c.prompt_frame.firstCursorPosition()
cur.movePosition(cur.PreviousCharacter)
c.setTextCursor(cur)
def __exit__(self, *args):
self.console.cursor_pos = self.opos
# }}}
class ThemeMenu(QMenu): # {{{
def __init__(self, parent):
QMenu.__init__(self, _('Choose theme (needs restart)'))
parent.addMenu(self)
self.group = QActionGroup(self)
current = prefs['theme']
alls = list(sorted(get_all_styles()))
if current not in alls:
current = prefs['theme'] = 'default'
self.actions = []
for style in alls:
ac = self.group.addAction(style)
ac.setCheckable(True)
if current == style:
ac.setChecked(True)
self.actions.append(ac)
ac.triggered.connect(partial(self.set_theme, style))
self.addAction(ac)
def set_theme(self, style, *args):
prefs['theme'] = style
# }}}
class Console(QTextEdit):
running = pyqtSignal()
running_done = pyqtSignal()
@property
def doc(self):
return self.document()
@property
def cursor(self):
return self.textCursor()
@property
def root_frame(self):
return self.doc.rootFrame()
def unhandled_exception(self, type, value, tb):
if type == KeyboardInterrupt:
return
try:
sio = StringIO.StringIO()
traceback.print_exception(type, value, tb, file=sio)
fe = sio.getvalue()
prints(fe)
try:
val = unicode(value)
except:
val = repr(value)
msg = '<b>%s</b>:'%type.__name__ + val
error_dialog(self, _('ERROR: Unhandled exception'), msg,
det_msg=fe, show=True)
except BaseException:
pass
def __init__(self,
prompt='>>> ',
continuation='... ',
parent=None):
QTextEdit.__init__(self, parent)
self.shutting_down = False
self.compiler = CommandCompiler()
self.buf = self.old_buf = []
self.history = History([''], dynamic.get('console_history', []))
self.prompt_frame = None
self.allow_output = False
self.prompt_frame_format = QTextFrameFormat()
self.prompt_frame_format.setBorder(1)
self.prompt_frame_format.setBorderStyle(QTextFrameFormat.BorderStyle_Solid)
self.prompt_len = len(prompt)
self.doc.setMaximumBlockCount(int(prefs['scrollback']))
self.lexer = PythonLexer(ensurenl=False)
self.tb_lexer = PythonTracebackLexer()
self.context_menu = cm = QMenu(self) # {{{
cm.theme = ThemeMenu(cm)
# }}}
self.formatter = Formatter(prompt, continuation, style=prefs['theme'])
p = QPalette()
p.setColor(p.Base, QColor(self.formatter.background_color))
p.setColor(p.Text, QColor(self.formatter.color))
self.setPalette(p)
self.key_dispatcher = { # {{{
Qt.Key_Enter : self.enter_pressed,
Qt.Key_Return : self.enter_pressed,
Qt.Key_Up : self.up_pressed,
Qt.Key_Down : self.down_pressed,
Qt.Key_Home : self.home_pressed,
Qt.Key_End : self.end_pressed,
Qt.Key_Left : self.left_pressed,
Qt.Key_Right : self.right_pressed,
Qt.Key_Backspace : self.backspace_pressed,
Qt.Key_Delete : self.delete_pressed,
} # }}}
motd = textwrap.dedent('''\
# Python {0}
# {1} {2}
'''.format(sys.version.splitlines()[0], __appname__,
__version__))
sys.excepthook = self.unhandled_exception
self.controllers = []
QTimer.singleShot(0, self.launch_controller)
with EditBlock(self.cursor):
self.render_block(motd)
def shutdown(self):
dynamic.set('console_history', self.history.serialize())
self.shutting_down = True
for c in self.controllers:
c.kill()
def contextMenuEvent(self, event):
self.context_menu.popup(event.globalPos())
event.accept()
# Controller management {{{
@property
def controller(self):
return self.controllers[-1]
def no_controller_error(self):
error_dialog(self, _('No interpreter'),
_('No active interpreter found. Try restarting the'
' console'), show=True)
def launch_controller(self, *args):
c = Controller(self)
c.write_output.connect(self.show_output, type=Qt.QueuedConnection)
c.show_error.connect(self.show_error, type=Qt.QueuedConnection)
c.interpreter_died.connect(self.interpreter_died,
type=Qt.QueuedConnection)
c.interpreter_done.connect(self.execution_done)
self.controllers.append(c)
def interpreter_died(self, controller, returncode):
if not self.shutting_down and controller.current_command is not None:
error_dialog(self, _('Interpreter died'),
_('Interpreter dies while executing a command. To see '
'the command, click Show details'),
det_msg=controller.current_command, show=True)
def execute(self, prompt_lines):
c = self.root_frame.lastCursorPosition()
self.setTextCursor(c)
self.old_prompt_frame = self.prompt_frame
self.prompt_frame = None
self.old_buf = self.buf
self.buf = []
self.running.emit()
self.controller.runsource('\n'.join(prompt_lines))
def execution_done(self, controller, ret):
if controller is self.controller:
self.running_done.emit()
if ret: # Incomplete command
self.buf = self.old_buf
self.prompt_frame = self.old_prompt_frame
c = self.prompt_frame.lastCursorPosition()
c.insertBlock()
self.setTextCursor(c)
else: # Command completed
try:
self.old_prompt_frame.setFrameFormat(QTextFrameFormat())
except RuntimeError:
# Happens if enough lines of output that the old
# frame was deleted
pass
self.render_current_prompt()
# }}}
# Prompt management {{{
@dynamic_property
def cursor_pos(self):
doc = '''
The cursor position in the prompt has the form (row, col).
row starts at 0 for the first line
col is 0 if the cursor is at the start of the line, 1 if it is after
the first character, n if it is after the nth char.
'''
def fget(self):
if self.prompt_frame is not None:
pos = self.cursor.position()
it = self.prompt_frame.begin()
lineno = 0
while not it.atEnd():
bl = it.currentBlock()
if bl.contains(pos):
return (lineno, pos - bl.position())
it += 1
lineno += 1
return (-1, -1)
def fset(self, val):
row, col = val
if self.prompt_frame is not None:
it = self.prompt_frame.begin()
lineno = 0
while not it.atEnd():
if lineno == row:
c = self.cursor
c.setPosition(it.currentBlock().position())
c.movePosition(c.NextCharacter, n=col)
self.setTextCursor(c)
break
it += 1
lineno += 1
return property(fget=fget, fset=fset, doc=doc)
def move_cursor_to_prompt(self):
if self.prompt_frame is not None and self.cursor_pos[0] < 0:
c = self.prompt_frame.lastCursorPosition()
self.setTextCursor(c)
def prompt(self, strip_prompt_strings=True):
if not self.prompt_frame:
yield u'' if strip_prompt_strings else self.formatter.prompt
else:
it = self.prompt_frame.begin()
while not it.atEnd():
bl = it.currentBlock()
t = unicode(bl.text())
if strip_prompt_strings:
t = t[self.prompt_len:]
yield t
it += 1
def set_prompt(self, lines):
self.render_current_prompt(lines)
def clear_current_prompt(self):
if self.prompt_frame is None:
c = self.root_frame.lastCursorPosition()
self.prompt_frame = c.insertFrame(self.prompt_frame_format)
self.setTextCursor(c)
else:
c = self.prompt_frame.firstCursorPosition()
self.setTextCursor(c)
c.setPosition(self.prompt_frame.lastPosition(), c.KeepAnchor)
c.removeSelectedText()
c.setPosition(self.prompt_frame.firstPosition())
def render_current_prompt(self, lines=None, restore_cursor=False):
row, col = self.cursor_pos
cp = list(self.prompt()) if lines is None else lines
self.clear_current_prompt()
for i, line in enumerate(cp):
start = i == 0
end = i == len(cp) - 1
self.formatter.render_prompt(not start, self.cursor)
self.formatter.render(self.lexer.get_tokens(line), self.cursor)
if not end:
self.cursor.insertBlock()
if row > -1 and restore_cursor:
self.cursor_pos = (row, col)
self.ensureCursorVisible()
# }}}
# Non-prompt Rendering {{{
def render_block(self, text, restore_prompt=True):
self.formatter.render(self.lexer.get_tokens(text), self.cursor)
self.cursor.insertBlock()
self.cursor.movePosition(self.cursor.End)
if restore_prompt:
self.render_current_prompt()
def show_error(self, is_syntax_err, tb, controller=None):
if self.prompt_frame is not None:
# At a prompt, so redirect output
return prints(tb, end='')
try:
self.buf.append(tb)
if is_syntax_err:
self.formatter.render_syntax_error(tb, self.cursor)
else:
self.formatter.render(self.tb_lexer.get_tokens(tb), self.cursor)
except:
prints(tb, end='')
self.ensureCursorVisible()
QApplication.processEvents()
def show_output(self, raw, which='stdout', controller=None):
def do_show():
try:
self.buf.append(raw)
self.formatter.render_raw(raw, self.cursor)
except:
import traceback
prints(traceback.format_exc())
prints(raw, end='')
if self.prompt_frame is not None:
with Prepender(self):
do_show()
else:
do_show()
self.ensureCursorVisible()
QApplication.processEvents()
# }}}
# Keyboard management {{{
def keyPressEvent(self, ev):
text = unicode(ev.text())
key = ev.key()
action = self.key_dispatcher.get(key, None)
if callable(action):
action()
elif key in (Qt.Key_Escape,):
QTextEdit.keyPressEvent(self, ev)
elif text:
self.text_typed(text)
else:
QTextEdit.keyPressEvent(self, ev)
def left_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
if pos > self.prompt_len:
c = self.cursor
c.movePosition(c.PreviousCharacter)
self.setTextCursor(c)
elif lineno > 0:
c = self.cursor
c.movePosition(c.Up)
c.movePosition(c.EndOfLine)
self.setTextCursor(c)
self.ensureCursorVisible()
def up_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
if lineno == 0:
b = self.history.back()
if b is not None:
self.set_prompt(b)
else:
c = self.cursor
c.movePosition(c.Up)
self.setTextCursor(c)
self.ensureCursorVisible()
def backspace_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
if pos > self.prompt_len:
self.cursor.deletePreviousChar()
elif lineno > 0:
c = self.cursor
c.movePosition(c.Up)
c.movePosition(c.EndOfLine)
self.setTextCursor(c)
self.ensureCursorVisible()
def delete_pressed(self):
self.cursor.deleteChar()
self.ensureCursorVisible()
def right_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
c = self.cursor
cp = list(self.prompt(False))
if pos < len(cp[lineno]):
c.movePosition(c.NextCharacter)
elif lineno < len(cp)-1:
c.movePosition(c.NextCharacter, n=1+self.prompt_len)
self.setTextCursor(c)
self.ensureCursorVisible()
def down_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
c = self.cursor
cp = list(self.prompt(False))
if lineno >= len(cp) - 1:
b = self.history.forward()
if b is not None:
self.set_prompt(b)
else:
c = self.cursor
c.movePosition(c.Down)
self.setTextCursor(c)
self.ensureCursorVisible()
def home_pressed(self):
if self.prompt_frame is not None:
mods = QApplication.keyboardModifiers()
ctrl = bool(int(mods & Qt.CTRL))
if ctrl:
self.cursor_pos = (0, self.prompt_len)
else:
c = self.cursor
c.movePosition(c.StartOfLine)
c.movePosition(c.NextCharacter, n=self.prompt_len)
self.setTextCursor(c)
self.ensureCursorVisible()
def end_pressed(self):
if self.prompt_frame is not None:
mods = QApplication.keyboardModifiers()
ctrl = bool(int(mods & Qt.CTRL))
if ctrl:
self.cursor_pos = (len(list(self.prompt()))-1, self.prompt_len)
c = self.cursor
c.movePosition(c.EndOfLine)
self.setTextCursor(c)
self.ensureCursorVisible()
def enter_pressed(self):
if self.prompt_frame is None:
return
if not self.controller.is_alive:
return self.no_controller_error()
cp = list(self.prompt())
if cp[0]:
try:
ret = self.compiler('\n'.join(cp))
except:
pass
else:
if ret is None:
c = self.prompt_frame.lastCursorPosition()
c.insertBlock()
self.setTextCursor(c)
self.render_current_prompt()
return
else:
self.history.enter(cp)
self.execute(cp)
def text_typed(self, text):
if self.prompt_frame is not None:
self.move_cursor_to_prompt()
self.cursor.insertText(text)
self.render_current_prompt(restore_cursor=True)
self.history.current = list(self.prompt())
# }}}
| 0.00337 |
from __future__ import print_function
import time
from math import pi
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.models.glyphs import Line, Quad
from bokeh.models import (
Plot, ColumnDataSource, DataRange1d, FactorRange,
LinearAxis, CategoricalAxis, Grid, Legend,
SingleIntervalTicker
)
from bokeh.sampledata.population import load_population
from bokeh.session import Session
from bokeh.models.widgets import Select, HBox, VBox
document = Document()
session = Session()
session.use_doc('population_server')
session.load_document(document)
df = load_population()
revision = 2012
year = 2010
location = "World"
years = [str(x) for x in sorted(df.Year.unique())]
locations = sorted(df.Location.unique())
source_pyramid = ColumnDataSource(data=dict())
def pyramid():
xdr = DataRange1d(sources=[source_pyramid.columns("male"), source_pyramid.columns("female")])
ydr = DataRange1d(sources=[source_pyramid.columns("groups")])
plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=600, plot_height=600)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
male_quad = Quad(left="male", right=0, bottom="groups", top="shifted", fill_color="#3B8686")
male_quad_glyph = plot.add_glyph(source_pyramid, male_quad)
female_quad = Quad(left=0, right="female", bottom="groups", top="shifted", fill_color="#CFF09E")
female_quad_glyph = plot.add_glyph(source_pyramid, female_quad)
plot.add_layout(Legend(legends=[("Male", [male_quad_glyph]), ("Female", [female_quad_glyph])]))
return plot
source_known = ColumnDataSource(data=dict(x=[], y=[]))
source_predicted = ColumnDataSource(data=dict(x=[], y=[]))
def population():
xdr = FactorRange(factors=years)
ydr = DataRange1d(sources=[source_known.columns("y"), source_predicted.columns("y")])
plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=800, plot_height=200)
plot.add_layout(CategoricalAxis(major_label_orientation=pi/4), 'below')
line_known = Line(x="x", y="y", line_color="violet", line_width=2)
line_known_glyph = plot.add_glyph(source_known, line_known)
line_predicted = Line(x="x", y="y", line_color="violet", line_width=2, line_dash="dashed")
line_predicted_glyph = plot.add_glyph(source_predicted, line_predicted)
plot.add_layout(
Legend(
orientation="bottom_right",
legends=[("known", [line_known_glyph]), ("predicted", [line_predicted_glyph])],
)
)
return plot
def update_pyramid():
pyramid = df[(df.Location == location) & (df.Year == year)]
male = pyramid[pyramid.Sex == "Male"]
female = pyramid[pyramid.Sex == "Female"]
total = male.Value.sum() + female.Value.sum()
male_percent = -male.Value/total
female_percent = female.Value/total
groups = male.AgeGrpStart.tolist()
shifted = groups[1:] + [groups[-1] + 5]
source_pyramid.data = dict(
groups=groups,
shifted=shifted,
male=male_percent,
female=female_percent,
)
def update_population():
population = df[df.Location == location].groupby(df.Year).Value.sum()
aligned_revision = revision//10 * 10
known = population[population.index <= aligned_revision]
predicted = population[population.index >= aligned_revision]
source_known.data = dict(x=known.index.map(str), y=known.values)
source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)
def update_data():
update_population()
update_pyramid()
session.store_document(document)
def on_year_change(obj, attr, old, new):
global year
year = int(new)
update_data()
def on_location_change(obj, attr, old, new):
global location
location = new
update_data()
def layout():
year_select = Select(title="Year:", value="2010", options=years)
location_select = Select(title="Location:", value="World", options=locations)
year_select.on_change('value', on_year_change)
location_select.on_change('value', on_location_change)
controls = HBox(children=[year_select, location_select])
layout = VBox(children=[controls, pyramid(), population()])
return layout
document.add(layout())
update_data()
if __name__ == "__main__":
link = session.object_link(document.context)
print("Please visit %s to see the plots" % link)
view(link)
print("\npress ctrl-C to exit")
session.poll_document(document)
| 0.004492 |
# -*- coding: utf-8 -*-
#
# brunel_delta_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Random balanced network (delta synapses)
----------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in
Brunel N, Dynamics of Sparsely Connected Networks of Excitatory and
Inhibitory Spiking Neurons, Journal of Computational Neuroscience 8,
183–208 (2000).
When connecting the network customary synapse models are used, which
allow for querying the number of created synapses. Using spike
detectors the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
'''
'''
Import all necessary modules for simulation, analysis and plotting.
'''
import nest
import nest.raster_plot
import time
from numpy import exp
nest.ResetKernel()
'''
Assigning the current time to a variable in order to determine the
build time of the network.
'''
startbuild = time.time()
'''
Assigning the simulation parameters to variables.
'''
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
'''
Definition of the parameters crucial for asynchronous irregular firing
of the neurons.
'''
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
'''
Definition of the number of neurons in the network and the number of
neuron recorded from
'''
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
'''
Definition of connectivity parameter
'''
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
'''
Initialization of the parameters of the integrate and fire neuron and
the synapses. The parameter of the neuron are stored in a dictionary.
'''
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_ex = J # amplitude of excitatory postsynaptic potential
J_in = -g * J_ex # amplitude of inhibitory postsynaptic potential
'''
Definition of threshold rate, which is the external rate needed to fix
the membrane potential around its threshold, the external firing rate
and the rate of the poisson generator which is multiplied by the
in-degree CE and converted to Hz by multiplication by 1000.
'''
nu_th = theta / (J * CE * tauMem)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
'''
Configuration of the simulation kernel by the previously defined time
resolution used in the simulation. Setting "print_time" to True prints
the already processed simulation time as well as its percentage of the
total simulation time.
'''
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
'''
Configuration of the model `iaf_psc_delta` and `poisson_generator`
using SetDefaults(). This function expects the model to be the
inserted as a string and the parameter to be specified in a
dictionary. All instances of theses models created after this point
will have the properties specified in the dictionary by default.
'''
nest.SetDefaults("iaf_psc_delta", neuron_params)
nest.SetDefaults("poisson_generator", {"rate": p_rate})
'''
Creation of the nodes using `Create`. We store the returned handles in
variables for later reference. Here the excitatory and inhibitory, as
well as the poisson generator and two spike detectors. The spike
detectors will later be used to record excitatory and inhibitory
spikes.
'''
nodes_ex = nest.Create("iaf_psc_delta", NE)
nodes_in = nest.Create("iaf_psc_delta", NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
'''
Configuration of the spike detectors recording excitatory and
inhibitory spikes using `SetStatus`, which expects a list of node
handles and a list of parameter dictionaries. Setting the variable
"to_file" to True ensures that the spikes will be recorded in a .gdf
file starting with the string assigned to label. Setting "withtime"
and "withgid" to True ensures that each spike is saved to file by
stating the gid of the spiking neuron and the spike time in one line.
'''
nest.SetStatus(espikes, [{"label": "brunel-py-ex",
"withtime": True,
"withgid": True,
"to_file": True}])
nest.SetStatus(ispikes, [{"label": "brunel-py-in",
"withtime": True,
"withgid": True,
"to_file": True}])
print("Connecting devices")
'''
Definition of a synapse using `CopyModel`, which expects the model
name of a pre-defined synapse, the name of the customary synapse and
an optional parameter dictionary. The parameters defined in the
dictionary will be the default parameter for the customary
synapse. Here we define one synapse for the excitatory and one for the
inhibitory connections giving the previously defined weights and equal
delays.
'''
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
'''
Connecting the previously defined poisson generator to the excitatory
and inhibitory neurons using the excitatory synapse. Since the poisson
generator is connected to all neurons in the population the default
rule ('all_to_all') of Connect() is used. The synaptic properties are
inserted via syn_spec which expects a dictionary when defining
multiple variables or a string when simply using a pre-defined
synapse.
'''
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
'''
Connecting the first N_rec nodes of the excitatory and inhibitory
population to the associated spike detectors using excitatory
synapses. Here the same shortcut for the specification of the synapse
as defined above is used.
'''
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
'''
Connecting the excitatory population to all neurons using the
pre-defined excitatory synapse. Beforehand, the connection parameter
are defined in a dictionary. Here we use the connection rule
'fixed_indegree', which requires the definition of the indegree. Since
the synapse specification is reduced to assigning the pre-defined
excitatory synapse it suffices to insert a string.
'''
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
'''
Connecting the inhibitory population to all neurons using the
pre-defined inhibitory synapse. The connection parameter as well as
the synapse paramtere are defined analogously to the connection from
the excitatory population defined above.
'''
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
'''
Storage of the time point after the buildup of the network in a
variable.
'''
endbuild = time.time()
'''
Simulation of the network.
'''
print("Simulating")
nest.Simulate(simtime)
'''
Storage of the time point after the simulation of the network in a
variable.
'''
endsimulate = time.time()
'''
Reading out the total number of spikes received from the spike
detector connected to the excitatory population and the inhibitory
population.
'''
events_ex = nest.GetStatus(espikes, "n_events")[0]
events_in = nest.GetStatus(ispikes, "n_events")[0]
'''
Calculation of the average firing rate of the excitatory and the
inhibitory neurons by dividing the total number of recorded spikes by
the number of neurons recorded from and the simulation time. The
multiplication by 1000.0 converts the unit 1/ms to 1/s=Hz.
'''
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
'''
Reading out the number of connections established using the excitatory
and inhibitory synapse model. The numbers are summed up resulting in
the total number of synapses.
'''
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
'''
Establishing the time it took to build and simulate the network by
taking the difference of the pre-defined time variables.
'''
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
'''
Printing the network properties, firing rates and building times.
'''
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
'''
Plot a raster of the excitatory neurons and a histogram.
'''
nest.raster_plot.from_device(espikes, hist=True)
| 0.000384 |
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.unicode import to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
ret = []
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
with open(lookupfile, 'r') as f:
template_data = to_unicode(f.read())
# set jinja2 internal search path for includes
if 'ansible_search_path' in variables:
searchpath = variables['ansible_search_path']
else:
searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
self._templar.environment.loader.searchpath = searchpath
# do the templating
res = self._templar.template(template_data, preserve_trailing_newlines=True,convert_data=convert_data_p)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| 0.002146 |
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
def pca_results(good_data, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def cluster_results(reduced_data, preds, centers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions
Adds cues for cluster centers and student-selected sample data
'''
predictions = pd.DataFrame(preds, columns = ['Cluster'])
plot_data = pd.concat([predictions, reduced_data], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned cluster
for i, cluster in plot_data.groupby('Cluster'):
cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30);
# Plot centers with indicators
for i, c in enumerate(centers):
ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \
alpha = 1, linewidth = 2, marker = 'o', s=200);
ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100);
# Plot transformed sample points
ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \
s = 150, linewidth = 4, color = 'black', marker = 'x');
# Set plot title
ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross");
def biplot(good_data, reduced_data, pca):
'''
Produce a biplot that shows a scatterplot of the reduced
data and the projections of the original features.
good_data: original data, before transformation.
Needs to be a pandas dataframe with valid column names
reduced_data: the reduced data (the first two dimensions are plotted)
pca: pca object that contains the components_ attribute
return: a matplotlib AxesSubplot object (for any additional customization)
This procedure is inspired by the script:
https://github.com/teddyroland/python-biplot
'''
fig, ax = plt.subplots(figsize = (14,8))
# scatterplot of the reduced data
ax.scatter(x=reduced_data.loc[:, 'Dimension 1'], y=reduced_data.loc[:, 'Dimension 2'],
facecolors='b', edgecolors='b', s=70, alpha=0.5)
feature_vectors = pca.components_.T
# we use scaling factors to make the arrows easier to see
arrow_size, text_pos = 7.0, 8.0,
# projections of the original features
for i, v in enumerate(feature_vectors):
ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1],
head_width=0.2, head_length=0.2, linewidth=2, color='red')
ax.text(v[0]*text_pos, v[1]*text_pos, good_data.columns[i], color='black',
ha='center', va='center', fontsize=18)
ax.set_xlabel("Dimension 1", fontsize=14)
ax.set_ylabel("Dimension 2", fontsize=14)
ax.set_title("PC plane with original feature projections.", fontsize=16);
return ax
def channel_results(reduced_data, outliers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions using the full dataset
Data is labeled by "Channel" and cues added for student-selected sample data
'''
# Check that the dataset is loadable
try:
full_data = pd.read_csv("customers.csv")
except:
print "Dataset could not be loaded. Is the file missing?"
return False
# Create the Channel DataFrame
channel = pd.DataFrame(full_data['Channel'], columns = ['Channel'])
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data, channel], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned Channel
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
grouped = labeled.groupby('Channel')
for i, channel in grouped:
channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i-1)*1.0/2), label = labels[i-1], s=30);
# Plot transformed sample points
for i, sample in enumerate(pca_samples):
ax.scatter(x = sample[0], y = sample[1], \
s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none');
ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125);
# Set plot title
ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled"); | 0.045477 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_googleimagesearch_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(15)
class SkiaGoogleimagesearchDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaGoogleimagesearchDesktopPageSet, self).__init__(
archive_data_file='data/skia_googleimagesearch_desktop.json')
urls_list = [
# go/skia-skps-3-2019
'https://www.google.com/search?q=cats&tbm=isch',
]
for url in urls_list:
self.AddStory(SkiaDesktopPage(url, self)) | 0.004886 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
#! Linear solvers
#! =================
#!
#! We demonstrate solving a dense system A.x=b by using different linear solvers.
#!
from casadi import *
from numpy import *
import time
n=100
#$ We generate $A \in \mathbf{R}^{n \times n}$, $x \in \mathbf{R}^{n}$ with $n=100$
A=DMatrix([[cos(i*j)-sin(i) for i in range(n)] for j in range(n)])
x=DMatrix([tan(i) for i in range(n)])
#! We generate the b vector:
b=mul(A,x)
#! We demonstrate the LinearSolver API with CSparse:
s = CSparse(A.sparsity())
s.init()
#! Give it the matrix A
s.input(0).set(A)
#! Do the LU factorization
s.prepare()
#! Give it the matrix b
s.input(1).set(b)
#! And we are off to find x...
s.solve()
x_ = s.output()
#! By looking at the residuals between the x we knew in advance and the computed x, we see that the CSparse solver works
print "Sum of residuals = %.2e" % sumAll(fabs(x-x_))
#! Comparison of different linear solvers
#! ======================================
for name, solver in [("LapackLUDense",LapackLUDense),("LapackQRDense",LapackQRDense),("CSparse",CSparse)]:
s = solver(A.sparsity()) # We create a solver
s.init()
s.input(0).set(A) # Give it the matrix A
t0 = time.time()
for i in range(100):
s.prepare() # Do the LU factorization
pt = (time.time()-t0)/100
s.input(1).set(b) # Give it the matrix b
t0 = time.time()
for i in range(100):
s.solve()
st = (time.time()-t0)/100
x_ = s.output()
print ""
print name
print "=" * 10
print "Sum of residuals = %.2e" % sumAll(fabs(x-x_))
print "Preparation time = %0.2f ms" % (pt*1000)
print "Solve time = %0.2f ms" % (st*1000)
assert(sumAll(fabs(x-x_))<1e-9)
#! Note that these
| 0.023828 |
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Optional
import pytest
import ibis.expr.types as ir
from ibis.backends.base.sql import SQLClient
from ibis.backends.base.sql.alchemy import (
AlchemyCompiler,
AlchemyTable,
table_from_schema,
)
from ibis.expr.schema import Schema
from ibis.expr.typing import TimeContext
class MockConnection(SQLClient, metaclass=abc.ABCMeta):
def __init__(self):
self.executed_queries = []
_tables = {
'alltypes': [
('a', 'int8'),
('b', 'int16'),
('c', 'int32'),
('d', 'int64'),
('e', 'float'),
('f', 'double'),
('g', 'string'),
('h', 'boolean'),
('i', 'timestamp'),
('j', 'date'),
('k', 'time'),
],
'star1': [
('c', 'int32'),
('f', 'double'),
('foo_id', 'string'),
('bar_id', 'string'),
],
'star2': [
('foo_id', 'string'),
('value1', 'double'),
('value3', 'double'),
],
'star3': [('bar_id', 'string'), ('value2', 'double')],
'test1': [('c', 'int32'), ('f', 'double'), ('g', 'string')],
'test2': [('key', 'string'), ('value', 'double')],
'tpch_region': [
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
],
'tpch_nation': [
('n_nationkey', 'int16'),
('n_name', 'string'),
('n_regionkey', 'int16'),
('n_comment', 'string'),
],
'tpch_lineitem': [
('l_orderkey', 'int64'),
('l_partkey', 'int64'),
('l_suppkey', 'int64'),
('l_linenumber', 'int32'),
('l_quantity', 'decimal(12,2)'),
('l_extendedprice', 'decimal(12,2)'),
('l_discount', 'decimal(12,2)'),
('l_tax', 'decimal(12,2)'),
('l_returnflag', 'string'),
('l_linestatus', 'string'),
('l_shipdate', 'string'),
('l_commitdate', 'string'),
('l_receiptdate', 'string'),
('l_shipinstruct', 'string'),
('l_shipmode', 'string'),
('l_comment', 'string'),
],
'tpch_customer': [
('c_custkey', 'int64'),
('c_name', 'string'),
('c_address', 'string'),
('c_nationkey', 'int16'),
('c_phone', 'string'),
('c_acctbal', 'decimal'),
('c_mktsegment', 'string'),
('c_comment', 'string'),
],
'tpch_orders': [
('o_orderkey', 'int64'),
('o_custkey', 'int64'),
('o_orderstatus', 'string'),
('o_totalprice', 'decimal(12,2)'),
('o_orderdate', 'string'),
('o_orderpriority', 'string'),
('o_clerk', 'string'),
('o_shippriority', 'int32'),
('o_comment', 'string'),
],
'functional_alltypes': [
('id', 'int32'),
('bool_col', 'boolean'),
('tinyint_col', 'int8'),
('smallint_col', 'int16'),
('int_col', 'int32'),
('bigint_col', 'int64'),
('float_col', 'float'),
('double_col', 'double'),
('date_string_col', 'string'),
('string_col', 'string'),
('timestamp_col', 'timestamp'),
('year', 'int32'),
('month', 'int32'),
],
'airlines': [
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('dayofweek', 'int32'),
('dep_time', 'int32'),
('crs_dep_time', 'int32'),
('arr_time', 'int32'),
('crs_arr_time', 'int32'),
('carrier', 'string'),
('flight_num', 'int32'),
('tail_num', 'int32'),
('actual_elapsed_time', 'int32'),
('crs_elapsed_time', 'int32'),
('airtime', 'int32'),
('arrdelay', 'int32'),
('depdelay', 'int32'),
('origin', 'string'),
('dest', 'string'),
('distance', 'int32'),
('taxi_in', 'int32'),
('taxi_out', 'int32'),
('cancelled', 'int32'),
('cancellation_code', 'string'),
('diverted', 'int32'),
('carrier_delay', 'int32'),
('weather_delay', 'int32'),
('nas_delay', 'int32'),
('security_delay', 'int32'),
('late_aircraft_delay', 'int32'),
],
'tpcds_customer': [
('c_customer_sk', 'int64'),
('c_customer_id', 'string'),
('c_current_cdemo_sk', 'int32'),
('c_current_hdemo_sk', 'int32'),
('c_current_addr_sk', 'int32'),
('c_first_shipto_date_sk', 'int32'),
('c_first_sales_date_sk', 'int32'),
('c_salutation', 'string'),
('c_first_name', 'string'),
('c_last_name', 'string'),
('c_preferred_cust_flag', 'string'),
('c_birth_day', 'int32'),
('c_birth_month', 'int32'),
('c_birth_year', 'int32'),
('c_birth_country', 'string'),
('c_login', 'string'),
('c_email_address', 'string'),
('c_last_review_date', 'string'),
],
'tpcds_customer_address': [
('ca_address_sk', 'bigint'),
('ca_address_id', 'string'),
('ca_street_number', 'string'),
('ca_street_name', 'string'),
('ca_street_type', 'string'),
('ca_suite_number', 'string'),
('ca_city', 'string'),
('ca_county', 'string'),
('ca_state', 'string'),
('ca_zip', 'string'),
('ca_country', 'string'),
('ca_gmt_offset', 'decimal(5,2)'),
('ca_location_type', 'string'),
],
'tpcds_customer_demographics': [
('cd_demo_sk', 'bigint'),
('cd_gender', 'string'),
('cd_marital_status', 'string'),
('cd_education_status', 'string'),
('cd_purchase_estimate', 'int'),
('cd_credit_rating', 'string'),
('cd_dep_count', 'int'),
('cd_dep_employed_count', 'int'),
('cd_dep_college_count', 'int'),
],
'tpcds_date_dim': [
('d_date_sk', 'bigint'),
('d_date_id', 'string'),
('d_date', 'string'),
('d_month_seq', 'int'),
('d_week_seq', 'int'),
('d_quarter_seq', 'int'),
('d_year', 'int'),
('d_dow', 'int'),
('d_moy', 'int'),
('d_dom', 'int'),
('d_qoy', 'int'),
('d_fy_year', 'int'),
('d_fy_quarter_seq', 'int'),
('d_fy_week_seq', 'int'),
('d_day_name', 'string'),
('d_quarter_name', 'string'),
('d_holiday', 'string'),
('d_weekend', 'string'),
('d_following_holiday', 'string'),
('d_first_dom', 'int'),
('d_last_dom', 'int'),
('d_same_day_ly', 'int'),
('d_same_day_lq', 'int'),
('d_current_day', 'string'),
('d_current_week', 'string'),
('d_current_month', 'string'),
('d_current_quarter', 'string'),
('d_current_year', 'string'),
],
'tpcds_household_demographics': [
('hd_demo_sk', 'bigint'),
('hd_income_band_sk', 'int'),
('hd_buy_potential', 'string'),
('hd_dep_count', 'int'),
('hd_vehicle_count', 'int'),
],
'tpcds_item': [
('i_item_sk', 'bigint'),
('i_item_id', 'string'),
('i_rec_start_date', 'string'),
('i_rec_end_date', 'string'),
('i_item_desc', 'string'),
('i_current_price', 'decimal(7,2)'),
('i_wholesale_cost', 'decimal(7,2)'),
('i_brand_id', 'int'),
('i_brand', 'string'),
('i_class_id', 'int'),
('i_class', 'string'),
('i_category_id', 'int'),
('i_category', 'string'),
('i_manufact_id', 'int'),
('i_manufact', 'string'),
('i_size', 'string'),
('i_formulation', 'string'),
('i_color', 'string'),
('i_units', 'string'),
('i_container', 'string'),
('i_manager_id', 'int'),
('i_product_name', 'string'),
],
'tpcds_promotion': [
('p_promo_sk', 'bigint'),
('p_promo_id', 'string'),
('p_start_date_sk', 'int'),
('p_end_date_sk', 'int'),
('p_item_sk', 'int'),
('p_cost', 'decimal(15,2)'),
('p_response_target', 'int'),
('p_promo_name', 'string'),
('p_channel_dmail', 'string'),
('p_channel_email', 'string'),
('p_channel_catalog', 'string'),
('p_channel_tv', 'string'),
('p_channel_radio', 'string'),
('p_channel_press', 'string'),
('p_channel_event', 'string'),
('p_channel_demo', 'string'),
('p_channel_details', 'string'),
('p_purpose', 'string'),
('p_discount_active', 'string'),
],
'tpcds_store': [
('s_store_sk', 'bigint'),
('s_store_id', 'string'),
('s_rec_start_date', 'string'),
('s_rec_end_date', 'string'),
('s_closed_date_sk', 'int'),
('s_store_name', 'string'),
('s_number_employees', 'int'),
('s_floor_space', 'int'),
('s_hours', 'string'),
('s_manager', 'string'),
('s_market_id', 'int'),
('s_geography_class', 'string'),
('s_market_desc', 'string'),
('s_market_manager', 'string'),
('s_division_id', 'int'),
('s_division_name', 'string'),
('s_company_id', 'int'),
('s_company_name', 'string'),
('s_street_number', 'string'),
('s_street_name', 'string'),
('s_street_type', 'string'),
('s_suite_number', 'string'),
('s_city', 'string'),
('s_county', 'string'),
('s_state', 'string'),
('s_zip', 'string'),
('s_country', 'string'),
('s_gmt_offset', 'decimal(5,2)'),
('s_tax_precentage', 'decimal(5,2)'),
],
'tpcds_store_sales': [
('ss_sold_time_sk', 'bigint'),
('ss_item_sk', 'bigint'),
('ss_customer_sk', 'bigint'),
('ss_cdemo_sk', 'bigint'),
('ss_hdemo_sk', 'bigint'),
('ss_addr_sk', 'bigint'),
('ss_store_sk', 'bigint'),
('ss_promo_sk', 'bigint'),
('ss_ticket_number', 'int'),
('ss_quantity', 'int'),
('ss_wholesale_cost', 'decimal(7,2)'),
('ss_list_price', 'decimal(7,2)'),
('ss_sales_price', 'decimal(7,2)'),
('ss_ext_discount_amt', 'decimal(7,2)'),
('ss_ext_sales_price', 'decimal(7,2)'),
('ss_ext_wholesale_cost', 'decimal(7,2)'),
('ss_ext_list_price', 'decimal(7,2)'),
('ss_ext_tax', 'decimal(7,2)'),
('ss_coupon_amt', 'decimal(7,2)'),
('ss_net_paid', 'decimal(7,2)'),
('ss_net_paid_inc_tax', 'decimal(7,2)'),
('ss_net_profit', 'decimal(7,2)'),
('ss_sold_date_sk', 'bigint'),
],
'tpcds_time_dim': [
('t_time_sk', 'bigint'),
('t_time_id', 'string'),
('t_time', 'int'),
('t_hour', 'int'),
('t_minute', 'int'),
('t_second', 'int'),
('t_am_pm', 'string'),
('t_shift', 'string'),
('t_sub_shift', 'string'),
('t_meal_time', 'string'),
],
}
def fetch_from_cursor(self, cursor, schema):
pass
def get_schema(self, name):
name = name.replace('`', '')
return Schema.from_tuples(self._tables[name])
def execute(self, expr, limit=None, params=None, **kwargs):
ast = self.compiler.to_ast_ensure_limit(expr, limit, params=params)
for query in ast.queries:
self.executed_queries.append(query.compile())
return None
def compile(
self,
expr,
limit=None,
params=None,
timecontext: Optional[TimeContext] = None,
):
ast = self.compiler.to_ast_ensure_limit(expr, limit, params=params)
queries = [q.compile() for q in ast.queries]
return queries[0] if len(queries) == 1 else queries
class MockAlchemyConnection(MockConnection):
compiler = AlchemyCompiler
def __init__(self):
super().__init__()
sa = pytest.importorskip('sqlalchemy')
self.meta = sa.MetaData()
def table(self, name, database=None):
schema = self.get_schema(name)
return self._inject_table(name, schema)
def _inject_table(self, name, schema):
if name in self.meta.tables:
table = self.meta.tables[name]
else:
table = table_from_schema(name, self.meta, schema)
node = AlchemyTable(table, self)
return ir.TableExpr(node)
GEO_TABLE = {
'geo': [
('id', 'int32'),
('geo_point', 'point'),
('geo_linestring', 'linestring'),
('geo_polygon', 'polygon'),
('geo_multipolygon', 'multipolygon'),
]
}
class GeoMockConnectionPostGIS(MockAlchemyConnection):
_tables = GEO_TABLE
def __init__(self):
super().__init__()
self.executed_queries = []
def get_schema(self, name):
return Schema.from_tuples(self._tables[name])
class GeoMockConnectionOmniSciDB(SQLClient):
_tables = GEO_TABLE
def __init__(self):
super().__init__()
self.executed_queries = []
def get_schema(self, name):
return Schema.from_tuples(self._tables[name])
| 0 |
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""În laboratorul lui Tuxy toți cercetătorii au asignat un id
de utilizator.
Pentru fiecare cercetător se salvează într-o listă de fiecare
dată când a deschis ușa (fie pentru a intra, fie pentru a ieși).
Tuxy suspectează că cineva rămâne tot timpul după program și
ar dori să scrie un script care să îi verifice teoria, dar
nu a reușit pentru că algoritmul său era prea costisitor pentru
sistem.
Cerințe:
I. Găsește cercetătorul ce stă peste program după o singură
parcurgere a listei
II. Găsește cercetătorul ce stă peste program după o singură
parcurgere a listei și fără a aloca memorie suplimentară.
"""
def gaseste_unic(istoric):
"""Găsește elementul unic.
Funcția primește o listă cu elemente numerice și trebuie
să returneze elementul care nu este duplicat.
Exemple:
1 2 3 2 1 - 3
1 1 1 2 2 - 1
"""
singur = 0
for numar in istoric:
singur = singur ^ numar
return singur
if __name__ == "__main__":
assert gaseste_unic([1, 2, 3, 2, 1]) == 3
assert gaseste_unic([1, 1, 1, 2, 2]) == 1
| 0 |
#!/usr/bin/env python
import coloreffect
class C(coloreffect.ColorEffect):
def __init__(self):
coloreffect.ColorEffect.__init__(self)
self.OptionParser.add_option("--r",
action="store", type="string",
dest="rFunction", default="r",
help="red channel function")
self.OptionParser.add_option("--g",
action="store", type="string",
dest="gFunction", default="g",
help="green channel function")
self.OptionParser.add_option("--b",
action="store", type="string",
dest="bFunction", default="b",
help="blue channel function")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
self.OptionParser.add_option("--scale",
action="store", type="string",
dest="scale",
help="The input (r,g,b) range")
def normalize(self, v):
if v<0:
return 0.0
if v > float(self.options.scale):
return float(self.options.scale)
return v
def _hexstr(self,r,g,b):
return '%02x%02x%02x' % (int(round(r)),int(round(g)),int(round(b)))
def colmod(self,_r,_g,_b):
factor = 255.0/float(self.options.scale)
r=float(_r)/factor
g=float(_g)/factor
b=float(_b)/factor
# add stuff to be accessible from within the custom color function here.
safeenv = {'__builtins__':{},'r':r,'g':g,'b':b}
try:
r2=self.normalize(eval(self.options.rFunction,safeenv))
g2=self.normalize(eval(self.options.gFunction,safeenv))
b2=self.normalize(eval(self.options.bFunction,safeenv))
except:
return self._hexstr(255.0,0.0,0.0)
return self._hexstr(r2*factor,g2*factor,b2*factor)
c = C()
c.affect()
| 0.024806 |
from __future__ import unicode_literals
import json
import time
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
)
class HypemIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
'info_dict': {
'id': '1v6ga',
'ext': 'mp3',
'title': 'Tame',
'uploader': 'BODYWORK',
}
}
def _real_extract(self, url):
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data))
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
html_tracks = self._html_search_regex(
r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>',
response, 'tracks')
try:
track_list = json.loads(html_tracks)
track = track_list['tracks'][0]
except ValueError:
raise ExtractorError('Hypemachine contained invalid JSON.')
key = track['key']
track_id = track['id']
title = track['song']
request = sanitized_Request(
'http://hypem.com/serve/source/%s/%s' % (track_id, key),
'', {'Content-Type': 'application/json'})
song_data = self._download_json(request, track_id, 'Downloading metadata')
final_url = song_data['url']
artist = track.get('artist')
return {
'id': track_id,
'url': final_url,
'ext': 'mp3',
'title': title,
'uploader': artist,
}
| 0.00159 |
# -*- coding: utf-8 -*-
"""
sphinx.jinja2glue
~~~~~~~~~~~~~~~~~
Glue code for the jinja2 templating engine.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from pprint import pformat
from jinja2 import FileSystemLoader, BaseLoader, TemplateNotFound, \
contextfunction
from jinja2.utils import open_if_exists
from jinja2.sandbox import SandboxedEnvironment
from sphinx.application import TemplateBridge
from sphinx.util.osutil import mtimes_of_files
def _tobool(val):
if isinstance(val, str):
return val.lower() in ('true', '1', 'yes', 'on')
return bool(val)
def _toint(val):
try:
return int(val)
except ValueError:
return 0
def accesskey(context, key):
"""Helper to output each access key only once."""
if '_accesskeys' not in context:
context.vars['_accesskeys'] = {}
if key and key not in context.vars['_accesskeys']:
context.vars['_accesskeys'][key] = 1
return 'accesskey="%s"' % key
return ''
class idgen(object):
def __init__(self):
self.id = 0
def current(self):
return self.id
def __next__(self):
self.id += 1
return self.id
next = __next__ # Python 2/Jinja compatibility
class SphinxFileSystemLoader(FileSystemLoader):
"""
FileSystemLoader subclass that is not so strict about '..' entries in
template names.
"""
def get_source(self, environment, template):
for searchpath in self.searchpath:
filename = path.join(searchpath, template)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
"""
Interfaces the rendering environment of jinja2 for use in Sphinx.
"""
# TemplateBridge interface
def init(self, builder, theme=None, dirs=None):
# create a chain of paths to search
if theme:
# the theme's own dir and its bases' dirs
chain = theme.get_dirchain()
# then the theme parent paths
chain.extend(theme.themepath)
elif dirs:
chain = list(dirs)
else:
chain = []
# prepend explicit template paths
self.templatepathlen = len(builder.config.templates_path)
if builder.config.templates_path:
chain[0:0] = [path.join(builder.confdir, tp)
for tp in builder.config.templates_path]
# store it for use in newest_template_mtime
self.pathchain = chain
# make the paths into loaders
self.loaders = list(map(SphinxFileSystemLoader, chain))
use_i18n = builder.app.translator is not None
extensions = use_i18n and ['jinja2.ext.i18n'] or []
self.environment = SandboxedEnvironment(loader=self,
extensions=extensions)
self.environment.filters['tobool'] = _tobool
self.environment.filters['toint'] = _toint
self.environment.globals['debug'] = contextfunction(pformat)
self.environment.globals['accesskey'] = contextfunction(accesskey)
self.environment.globals['idgen'] = idgen
if use_i18n:
self.environment.install_gettext_translations(
builder.app.translator)
def render(self, template, context):
return self.environment.get_template(template).render(context)
def render_string(self, source, context):
return self.environment.from_string(source).render(context)
def newest_template_mtime(self):
return max(mtimes_of_files(self.pathchain, '.html'))
# Loader interface
def get_source(self, environment, template):
loaders = self.loaders
# exclamation mark starts search from theme
if template.startswith('!'):
loaders = loaders[self.templatepathlen:]
template = template[1:]
for loader in loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
| 0.001502 |
#!/usr/bin/env python
'''
convert a MAVLink tlog file to a MATLab mfile
'''
import sys, os
import re
from pymavlink import mavutil
def process_tlog(filename):
'''convert a tlog to a .m file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, dialect=args.dialect, zero_time_base=True)
# first walk the entire file, grabbing all messages into a hash of lists,
#and the first message of each type into a hash
msg_types = {}
msg_lists = {}
types = args.types
if types is not None:
types = types.split(',')
# note that Octave doesn't like any extra '.', '*', '-', characters in the filename
(head, tail) = os.path.split(filename)
basename = '.'.join(tail.split('.')[:-1])
mfilename = re.sub('[\.\-\+\*]','_', basename) + '.m'
# Octave also doesn't like files that don't start with a letter
if (re.match('^[a-zA-z]', mfilename) == None):
mfilename = 'm_' + mfilename
if head is not None:
mfilename = os.path.join(head, mfilename)
print("Creating %s" % mfilename)
f = open(mfilename, "w")
type_counters = {}
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
break
if types is not None and m.get_type() not in types:
continue
if m.get_type() == 'BAD_DATA':
continue
fieldnames = m._fieldnames
mtype = m.get_type()
if mtype in ['FMT', 'PARM']:
continue
if mtype not in type_counters:
type_counters[mtype] = 0
f.write("%s.columns = {'timestamp'" % mtype)
for field in fieldnames:
val = getattr(m, field)
if not isinstance(val, str):
if type(val) is not list:
f.write(",'%s'" % field)
else:
for i in range(0, len(val)):
f.write(",'%s%d'" % (field, i + 1))
f.write("};\n")
type_counters[mtype] += 1
f.write("%s.data(%u,:) = [%f" % (mtype, type_counters[mtype], m._timestamp))
for field in m._fieldnames:
val = getattr(m, field)
if not isinstance(val, str):
if type(val) is not list:
f.write(",%.20g" % val)
else:
for i in range(0, len(val)):
f.write(",%.20g" % val[i])
f.write("];\n")
f.close()
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("-o", "--output", default=None, help="output filename")
parser.add_argument("--types", default=None, help="types of messages (comma separated)")
parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
for filename in args.logs:
process_tlog(filename)
| 0.005536 |
"""SCons.Tool.mslib
Tool-specific initialization for lib (MicroSoft library archiver).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslib.py 2014/03/02 14:18:15 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Tool.msvs
import SCons.Tool.msvc
import SCons.Util
from MSCommon import msvc_exists, msvc_setup_env_once
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 0.003573 |
"""
DogStatsd is a Python client for DogStatsd, a Statsd fork for Datadog.
"""
import logging
from random import random
from time import time
import socket
from functools import wraps
try:
from itertools import imap
except ImportError:
imap = map
log = logging.getLogger('dogstatsd')
class DogStatsd(object):
OK, WARNING, CRITICAL, UNKNOWN = (0, 1, 2, 3)
def __init__(self, host='localhost', port=8125, max_buffer_size = 50):
"""
Initialize a DogStatsd object.
>>> statsd = DogStatsd()
:param host: the host of the DogStatsd server.
:param port: the port of the DogStatsd server.
:param max_buffer_size: Maximum number of metric to buffer before sending to the server if sending metrics in batch
"""
self._host = None
self._port = None
self.socket = None
self.max_buffer_size = max_buffer_size
self._send = self._send_to_server
self.connect(host, port)
self.encoding = 'utf-8'
def get_socket(self):
'''
Return a connected socket
'''
if not self.socket:
self.connect(self._host, self._port)
return self.socket
def __enter__(self):
self.open_buffer(self.max_buffer_size)
return self
def __exit__(self, type, value, traceback):
self.close_buffer()
def open_buffer(self, max_buffer_size=50):
'''
Open a buffer to send a batch of metrics in one packet
You can also use this as a context manager.
>>> with DogStatsd() as batch:
>>> batch.gauge('users.online', 123)
>>> batch.gauge('active.connections', 1001)
'''
self.max_buffer_size = max_buffer_size
self.buffer= []
self._send = self._send_to_buffer
def close_buffer(self):
'''
Flush the buffer and switch back to single metric packets
'''
self._send = self._send_to_server
self._flush_buffer()
def connect(self, host, port):
"""
Connect to the statsd server on the given host and port.
"""
self._host = host
self._port = int(port)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.connect((self._host, self._port))
def gauge(self, metric, value, tags=None, sample_rate=1):
"""
Record the value of a gauge, optionally setting a list of tags and a
sample rate.
>>> statsd.gauge('users.online', 123)
>>> statsd.gauge('active.connections', 1001, tags=["protocol:http"])
"""
return self._report(metric, 'g', value, tags, sample_rate)
def increment(self, metric, value=1, tags=None, sample_rate=1):
"""
Increment a counter, optionally setting a value, tags and a sample
rate.
>>> statsd.increment('page.views')
>>> statsd.increment('files.transferred', 124)
"""
self._report(metric, 'c', value, tags, sample_rate)
def decrement(self, metric, value=1, tags=None, sample_rate=1):
"""
Decrement a counter, optionally setting a value, tags and a sample
rate.
>>> statsd.decrement('files.remaining')
>>> statsd.decrement('active.connections', 2)
"""
self._report(metric, 'c', -value, tags, sample_rate)
def histogram(self, metric, value, tags=None, sample_rate=1):
"""
Sample a histogram value, optionally setting tags and a sample rate.
>>> statsd.histogram('uploaded.file.size', 1445)
>>> statsd.histogram('album.photo.count', 26, tags=["gender:female"])
"""
self._report(metric, 'h', value, tags, sample_rate)
def timing(self, metric, value, tags=None, sample_rate=1):
"""
Record a timing, optionally setting tags and a sample rate.
>>> statsd.timing("query.response.time", 1234)
"""
self._report(metric, 'ms', value, tags, sample_rate)
def timed(self, metric, tags=None, sample_rate=1):
"""
A decorator that will measure the distribution of a function's run
time. Optionally specify a list of tag or a sample rate.
::
@statsd.timed('user.query.time', sample_rate=0.5)
def get_user(user_id):
# Do what you need to ...
pass
# Is equivalent to ...
start = time.time()
try:
get_user(user_id)
finally:
statsd.timing('user.query.time', time.time() - start)
"""
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
start = time()
result = func(*args, **kwargs)
self.timing(metric, time() - start, tags=tags,
sample_rate=sample_rate)
return result
return wrapped
return wrapper
def set(self, metric, value, tags=None, sample_rate=1):
"""
Sample a set value.
>>> statsd.set('visitors.uniques', 999)
"""
self._report(metric, 's', value, tags, sample_rate)
def _report(self, metric, metric_type, value, tags, sample_rate):
if sample_rate != 1 and random() > sample_rate:
return
payload = [metric, ":", value, "|", metric_type]
if sample_rate != 1:
payload.extend(["|@", sample_rate])
if tags:
payload.extend(["|#", ",".join(tags)])
encoded = "".join(imap(str, payload))
self._send(encoded)
def _send_to_server(self, packet):
try:
self.socket.send(packet.encode(self.encoding))
except socket.error:
log.info("Error submitting metric, will try refreshing the socket")
self.connect(self._host, self._port)
try:
self.socket.send(packet.encode(self.encoding))
except socket.error:
log.exception("Failed to send packet with a newly binded socket")
def _send_to_buffer(self, packet):
self.buffer.append(packet)
if len(self.buffer) >= self.max_buffer_size:
self._flush_buffer()
def _flush_buffer(self):
self._send_to_server("\n".join(self.buffer))
self.buffer=[]
def _escape_event_content(self, string):
return string.replace('\n', '\\n')
def _escape_service_check_message(self, string):
return string.replace('\n', '\\n').replace('m:', 'm\:')
def event(self, title, text, alert_type=None, aggregation_key=None,
source_type_name=None, date_happened=None, priority=None,
tags=None, hostname=None):
"""
Send an event. Attributes are the same as the Event API.
http://docs.datadoghq.com/api/
>>> statsd.event('Man down!', 'This server needs assistance.')
>>> statsd.event('The web server restarted', 'The web server is up again', alert_type='success') # NOQA
"""
title = self._escape_event_content(title)
text = self._escape_event_content(text)
string = u'_e{%d,%d}:%s|%s' % (len(title), len(text), title, text)
if date_happened:
string = '%s|d:%d' % (string, date_happened)
if hostname:
string = '%s|h:%s' % (string, hostname)
if aggregation_key:
string = '%s|k:%s' % (string, aggregation_key)
if priority:
string = '%s|p:%s' % (string, priority)
if source_type_name:
string = '%s|s:%s' % (string, source_type_name)
if alert_type:
string = '%s|t:%s' % (string, alert_type)
if tags:
string = '%s|#%s' % (string, ','.join(tags))
if len(string) > 8 * 1024:
raise Exception(u'Event "%s" payload is too big (more that 8KB), '
'event discarded' % title)
try:
self.socket.send(string.encode(self.encoding))
except Exception:
log.exception(u'Error submitting event "%s"' % title)
def service_check(self, check_name, status, tags=None, timestamp=None,
hostname=None, message=None):
"""
Send a service check run.
>>> statsd.service_check('my_service.check_name', DogStatsd.WARNING)
"""
message = self._escape_service_check_message(message) if message is not None else ''
string = u'_sc|{0}|{1}'.format(check_name, status)
if timestamp:
string = u'{0}|d:{1}'.format(string, timestamp)
if hostname:
string = u'{0}|h:{1}'.format(string, hostname)
if tags:
string = u'{0}|#{1}'.format(string, ','.join(tags))
if message:
string = u'{0}|m:{1}'.format(string, message)
try:
self.socket.send(string.encode(self.encoding))
except Exception:
log.exception(u'Error submitting service check "{0}"'.format(check_name))
statsd = DogStatsd()
| 0.00099 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
from QuantConnect.Data.Custom.Tiingo import *
### <summary>
### Look for positive and negative words in the news article description
### and trade based on the sum of the sentiment
### </summary>
class TiingoNewsAlgorithm(QCAlgorithm):
def Initialize(self):
# Predefine a dictionary of words with scores to scan for in the description
# of the Tiingo news article
self.words = {
"bad": -0.5, "good": 0.5,
"negative": -0.5, "great": 0.5,
"growth": 0.5, "fail": -0.5,
"failed": -0.5, "success": 0.5, "nailed": 0.5,
"beat": 0.5, "missed": -0.5,
}
self.SetStartDate(2019, 6, 10)
self.SetEndDate(2019, 10, 3)
self.SetCash(100000)
aapl = self.AddEquity("AAPL", Resolution.Hour).Symbol
self.aaplCustom = self.AddData(TiingoNews, aapl).Symbol
# Request underlying equity data.
ibm = self.AddEquity("IBM", Resolution.Minute).Symbol
# Add news data for the underlying IBM asset
news = self.AddData(TiingoNews, ibm).Symbol
# Request 60 days of history with the TiingoNews IBM Custom Data Symbol
history = self.History(TiingoNews, news, 60, Resolution.Daily)
# Count the number of items we get from our history request
self.Debug(f"We got {len(history)} items from our history request")
def OnData(self, data):
# Confirm that the data is in the collection
if not data.ContainsKey(self.aaplCustom):
return
# Gets the data from the slice
article = data[self.aaplCustom]
# Article descriptions come in all caps. Lower and split by word
descriptionWords = article.Description.lower().split(" ")
# Take the intersection of predefined words and the words in the
# description to get a list of matching words
intersection = set(self.words.keys()).intersection(descriptionWords)
# Get the sum of the article's sentiment, and go long or short
# depending if it's a positive or negative description
sentiment = sum([self.words[i] for i in intersection])
self.SetHoldings(article.Symbol.Underlying, sentiment)
| 0.002723 |
# Django settings for Userena demo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os
import sys
import django
settings_dir = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(settings_dir)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'private/development.db'),
}
}
# Internationalization
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
ugettext = lambda s: s
LANGUAGES = (
('en', ugettext('English')),
('nl', ugettext('Dutch')),
('fr', ugettext('French')),
('pl', ugettext('Polish')),
('pt', ugettext('Portugese')),
('pt-br', ugettext('Brazilian Portuguese')),
('es', ugettext('Spanish')),
('el', ugettext('Greek')),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public/media/')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public/static/')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'demo/static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_g-js)o8z#8=9pr1&05h^1_#)91sbo-)g^(*=-+epxmt4kc9m#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'userena.middleware.UserenaLocaleMiddleware',
)
# Add the Guardian and userena authentication backends
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Settings used by Userena
LOGIN_REDIRECT_URL = '/accounts/%(username)s/'
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/accounts/signout/'
AUTH_PROFILE_MODULE = 'profiles.Profile'
USERENA_DISABLE_PROFILE_LIST = True
USERENA_MUGSHOT_SIZE = 140
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'demo.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'guardian',
'userena',
'userena.contrib.umessages',
'userena.tests.profiles',
)
if django.VERSION < (1, 7, 0):
# only older versions of django require south migrations
INSTALLED_APPS += ('south',)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Needed for Django guardian
ANONYMOUS_USER_ID = -1
USERENA_USE_HTTPS = False
| 0.000938 |
import os
from django.core.checks import register, Warning
from django.utils.lru_cache import lru_cache
from willow.image import Image
@lru_cache()
def has_jpeg_support():
wagtail_jpg = os.path.join(os.path.dirname(__file__), 'check_files', 'wagtail.jpg')
succeeded = True
with open(wagtail_jpg, 'rb') as f:
try:
Image.open(f)
except (IOError, Image.LoaderError):
succeeded = False
return succeeded
@lru_cache()
def has_png_support():
wagtail_png = os.path.join(os.path.dirname(__file__), 'check_files', 'wagtail.png')
succeeded = True
with open(wagtail_png, 'rb') as f:
try:
Image.open(f)
except (IOError, Image.LoaderError):
succeeded = False
return succeeded
@register()
def image_library_check(app_configs, **kwargs):
errors = []
if not has_jpeg_support():
errors.append(
Warning(
'JPEG image support is not available',
hint="Check that the 'libjpeg' library is installed, then reinstall Pillow."
)
)
if not has_png_support():
errors.append(
Warning(
'PNG image support is not available',
hint="Check that the 'zlib' library is installed, then reinstall Pillow."
)
)
return errors
| 0.002915 |
from __future__ import print_function, division, absolute_import
#
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import fnmatch
import re
import logging
import six
from subscription_manager.unicode_width import textual_width as utf8_width
from subscription_manager.utils import get_terminal_width
from subscription_manager.i18n import ugettext as _
log = logging.getLogger(__name__)
FONT_BOLD = '\033[1m'
FONT_RED = '\033[31m'
FONT_NORMAL = '\033[0m'
def ljust_wide(in_str, padding):
return in_str + ' ' * (padding - utf8_width(in_str))
def columnize(caption_list, callback, *args, **kwargs):
"""
Take a list of captions and values and columnize the output so that
shorter captions are padded to be the same length as the longest caption.
For example:
Foo: Bar
Something Else: Baz
This function also takes a callback which is used to render the final line.
The callback gives us the ability to do things like replacing None values
with the string "None" (see none_wrap_columnize_callback()).
"""
indent = kwargs.get('indent', 0)
caption_list = [" " * indent + caption for caption in caption_list]
columns = get_terminal_width()
padding = sorted(map(utf8_width, caption_list))[-1] + 1
if columns:
padding = min(padding, int(columns / 2))
padded_list = []
for caption in caption_list:
lines = format_name(caption, indent, padding - 1).split('\n')
lines[-1] = ljust_wide(lines[-1], padding) + '%s'
fixed_caption = '\n'.join(lines)
padded_list.append(fixed_caption)
lines = list(zip(padded_list, args))
output = []
for (caption, value) in lines:
kwargs['caption'] = caption
if isinstance(value, dict):
value = [val for val in value.values()]
if isinstance(value, list):
if value:
# Put the first value on the same line as the caption
formatted_arg = format_name(value[0], padding, columns)
output.append(callback(caption, formatted_arg, **kwargs))
for val in value[1:]:
formatted_arg = format_name(val, padding, columns)
output.append(callback((" " * padding) + "%s", formatted_arg, **kwargs))
else:
# Degenerate case of an empty list
output.append(callback(caption, "", **kwargs))
else:
formatted_arg = format_name(value, padding, columns)
output.append(callback(caption, formatted_arg, **kwargs))
return '\n'.join(output)
def format_name(name, indent, max_length):
"""
Formats a potentially long name for multi-line display, giving
it a columned effect. Assumes the first line is already
properly indented.
"""
if not name or not max_length or (max_length - indent) <= 2 or not isinstance(name, six.string_types):
return name
if not isinstance(name, six.text_type):
name = name.decode("utf-8")
words = name.split()
lines = []
# handle emtpty names
if not words:
return name
# Preserve leading whitespace in front of the first word
leading_space = len(name) - len(name.lstrip())
words[0] = name[0:leading_space] + words[0]
# If there is leading whitespace, we've already indented the word and don't
# want to double count.
current = indent - leading_space
if current < 0:
current = 0
def add_line():
lines.append(' '.join(line))
line = []
# Split here and build it back up by word, this way we get word wrapping
while words:
word = words.pop(0)
if current + utf8_width(word) <= max_length:
current += utf8_width(word) + 1 # Have to account for the extra space
line.append(word)
else:
if line:
add_line()
# If the word will not fit, break it
if indent + utf8_width(word) > max_length:
split_index = 0
while(utf8_width(word[:split_index + 1]) + indent <= max_length):
split_index += 1
words.insert(0, word[split_index:])
word = word[:split_index]
line = [word]
if indent and lines:
line.insert(0, ' ' * (indent - 1))
current = indent + utf8_width(word) + 1
add_line()
return '\n'.join(lines)
def highlight_by_filter_string_columnize_cb(template_str, *args, **kwargs):
"""
Takes a template string and arguments and highlights word matches
when the value contains a match to the filter_string.This occurs
only when the row caption exists in the match columns. Mainly this
is a callback meant to be used by columnize().
"""
filter_string = kwargs.get('filter_string')
match_columns = kwargs.get('match_columns')
is_atty = kwargs.get('is_atty')
caption = kwargs.get('caption').split(':')[0] + ':'
p = None
# wildcard only disrupts the markup
if filter_string and filter_string.replace('*', ' ').replace('?', ' ').strip() == '':
filter_string = None
if is_atty and filter_string and caption in match_columns:
try:
p = re.compile(fnmatch.translate(filter_string), re.IGNORECASE)
except Exception as e:
log.error("Cannot compile search regex '%s'. %s", filter_string, e)
arglist = []
if args:
for arg in args:
if arg is None:
arg = _("None")
elif p:
for match in p.findall(arg.strip()):
replacer = FONT_BOLD + FONT_RED + match + FONT_NORMAL
arg = arg.replace(match, replacer)
arglist.append(arg)
return template_str % tuple(arglist)
def none_wrap_columnize_callback(template_str, *args, **kwargs):
"""
Takes a template string and arguments and replaces any None arguments
with the word "None" before rendering the template. Mainly this is
a callback meant to be used by columnize().
"""
arglist = []
for arg in args:
if arg is None:
arg = _("None")
arglist.append(arg)
return template_str % tuple(arglist)
def echo_columnize_callback(template_str, *args, **kwargs):
"""
Just takes a template string and arguments and renders it. Mainly
this is a callback meant to be used by columnize().
"""
return template_str % tuple(args)
# from http://farmdev.com/talks/unicode/
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, six.string_types):
if not isinstance(obj, six.text_type):
obj = six.text_type(obj, encoding)
return obj
| 0.000684 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Model zoo for pre-trained models."""
from __future__ import print_function
__all__ = ['get_model_file', 'purge']
import os
import zipfile
from ..utils import download, check_sha1
_model_sha1 = {name: checksum for checksum, name in [
('44335d1f0046b328243b32a26a4fbd62d9057b45', 'alexnet'),
('f27dbf2dbd5ce9a80b102d89c7483342cd33cb31', 'densenet121'),
('b6c8a95717e3e761bd88d145f4d0a214aaa515dc', 'densenet161'),
('2603f878403c6aa5a71a124c4a3307143d6820e9', 'densenet169'),
('1cdbc116bc3a1b65832b18cf53e1cb8e7da017eb', 'densenet201'),
('ed47ec45a937b656fcc94dabde85495bbef5ba1f', 'inceptionv3'),
('d2b128fa89477c2e20061607a53a8d9f66ce239d', 'resnet101_v1'),
('6562166cd597a6328a32a0ce47bb651df80b3bbb', 'resnet152_v1'),
('38d6d423c22828718ec3397924b8e116a03e6ac0', 'resnet18_v1'),
('4dc2c2390a7c7990e0ca1e53aeebb1d1a08592d1', 'resnet34_v1'),
('2a903ab21260c85673a78fe65037819a843a1f43', 'resnet50_v1'),
('8aacf80ff4014c1efa2362a963ac5ec82cf92d5b', 'resnet18_v2'),
('0ed3cd06da41932c03dea1de7bc2506ef3fb97b3', 'resnet34_v2'),
('eb7a368774aa34a12ed155126b641ae7556dad9d', 'resnet50_v2'),
('264ba4970a0cc87a4f15c96e25246a1307caf523', 'squeezenet1.0'),
('33ba0f93753c83d86e1eb397f38a667eaf2e9376', 'squeezenet1.1'),
('dd221b160977f36a53f464cb54648d227c707a05', 'vgg11'),
('ee79a8098a91fbe05b7a973fed2017a6117723a8', 'vgg11_bn'),
('6bc5de58a05a5e2e7f493e2d75a580d83efde38c', 'vgg13'),
('7d97a06c3c7a1aecc88b6e7385c2b373a249e95e', 'vgg13_bn'),
('649467530119c0f78c4859999e264e7bf14471a9', 'vgg16'),
('6b9dbe6194e5bfed30fd7a7c9a71f7e5a276cb14', 'vgg16_bn'),
('f713436691eee9a20d70a145ce0d53ed24bf7399', 'vgg19'),
('9730961c9cea43fd7eeefb00d792e386c45847d6', 'vgg19_bn')]}
_url_format = 'https://{bucket}.s3.amazonaws.com/gluon/models/{file_name}.zip'
bucket = 'apache-mxnet'
def short_hash(name):
if name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def get_model_file(name, local_dir=os.path.expanduser('~/.mxnet/models/')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The local_dir directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
local_dir : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
file_path = os.path.join(local_dir, file_name+'.params')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
print('Mismatch in the content of model file detected. Downloading again.')
else:
print('Model file is not found. Downloading.')
if not os.path.exists(local_dir):
os.makedirs(local_dir)
zip_file_path = os.path.join(local_dir, file_name+'.zip')
download(_url_format.format(bucket=bucket,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_dir)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def purge(local_dir=os.path.expanduser('~/.mxnet/models/')):
r"""Purge all pretrained model files in local file store.
Parameters
----------
local_dir : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
files = os.listdir(local_dir)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(local_dir, f))
| 0.001431 |
from django.test import TestCase
from django.contrib.auth.models import User
class BaseTestCase(TestCase):
def assertObject(self, instance, **params):
"""
Asserts that object have given attributes of given values.
Sample usage:
# Fetches user from DB and asserts that username=='bob'
self.assertObject(user, username='bob')
"""
for attr, expected in params.iteritems():
attr_name, _, comparator = attr.partition('__')
comparator = comparator or 'eq'
value = getattr(instance, attr_name)
if isinstance(value, property):
value = get
if comparator == 'eq':
self.assertEqual(expected, value, 'Failed assertion on %s: %s should equal %s'% (attr_name, value, expected))
elif comparator == 'gte':
self.assertGreaterEqual(value, expected, 'Failed assertion on %s: %s should be greater or equal to %s' % (attr_name, value, expected))
else:
raise ValueError('Unknown comparator: %s' % comparator)
def assertModel(self, model, pk, **params):
"""
Fetches object (of given model) with pk=pk, then asserts that
attributes updated.
Sample usage:
# Fetches user from DB and asserts that username=='bob'
self.assertModel(User, 1, username='bob')
"""
self.assertObject(model.objects.get(pk=pk), **params)
def assertObjectUpdated(self, old_instance, **params):
"""
Fetches object (of given model) with pk=pk, then asserts that
attributes updated.
Sample usage:
# Fetches user_instance from DB again and asserts username=='john'
self.assertObjectUpdated(user_instance, username='john')
"""
self.assertModel(old_instance.__class__, old_instance.pk, **params)
def login_user(self, username='username', password='password'):
user = User.objects.create_user(username=username, password=password)
self.client.login(username=username, password=password)
return user
| 0.001396 |
# -*- test-case-name: twisted.conch.test.test_ckeygen -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation module for the `ckeygen` command.
"""
import sys, os, getpass, socket
if getpass.getpass == getpass.unix_getpass:
try:
import termios # hack around broken termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
sys.modules['termios'] = None
reload(getpass)
from twisted.conch.ssh import keys
from twisted.python import filepath, log, usage, randbytes
class GeneralOptions(usage.Options):
synopsis = """Usage: ckeygen [options]
"""
longdesc = "ckeygen manipulates public/private keys in various ways."
optParameters = [['bits', 'b', 1024, 'Number of bits in the key to create.'],
['filename', 'f', None, 'Filename of the key file.'],
['type', 't', None, 'Specify type of key to create.'],
['comment', 'C', None, 'Provide new comment.'],
['newpass', 'N', None, 'Provide new passphrase.'],
['pass', 'P', None, 'Provide old passphrase']]
optFlags = [['fingerprint', 'l', 'Show fingerprint of key file.'],
['changepass', 'p', 'Change passphrase of private key file.'],
['quiet', 'q', 'Quiet.'],
['showpub', 'y', 'Read private key file and print public key.']]
compData = usage.Completions(
optActions={"type": usage.CompleteList(["rsa", "dsa"])})
def run():
options = GeneralOptions()
try:
options.parseOptions(sys.argv[1:])
except usage.UsageError, u:
print 'ERROR: %s' % u
options.opt_help()
sys.exit(1)
log.discardLogs()
log.deferr = handleError # HACK
if options['type']:
if options['type'] == 'rsa':
generateRSAkey(options)
elif options['type'] == 'dsa':
generateDSAkey(options)
else:
sys.exit('Key type was %s, must be one of: rsa, dsa' % options['type'])
elif options['fingerprint']:
printFingerprint(options)
elif options['changepass']:
changePassPhrase(options)
elif options['showpub']:
displayPublicKey(options)
else:
options.opt_help()
sys.exit(1)
def handleError():
from twisted.python import failure
global exitStatus
exitStatus = 2
log.err(failure.Failure())
reactor.stop()
raise
def generateRSAkey(options):
from Crypto.PublicKey import RSA
print 'Generating public/private rsa key pair.'
key = RSA.generate(int(options['bits']), randbytes.secureRandom)
_saveKey(key, options)
def generateDSAkey(options):
from Crypto.PublicKey import DSA
print 'Generating public/private dsa key pair.'
key = DSA.generate(int(options['bits']), randbytes.secureRandom)
_saveKey(key, options)
def printFingerprint(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
if os.path.exists(options['filename']+'.pub'):
options['filename'] += '.pub'
try:
key = keys.Key.fromFile(options['filename'])
obj = key.keyObject
string = key.blob()
print '%s %s %s' % (
obj.size() + 1,
key.fingerprint(),
os.path.basename(options['filename']))
except:
sys.exit('bad key')
def changePassPhrase(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
try:
key = keys.Key.fromFile(options['filename']).keyObject
except keys.BadKeyError, e:
if e.args[0] != 'encrypted key with no passphrase':
raise
else:
if not options['pass']:
options['pass'] = getpass.getpass('Enter old passphrase: ')
key = keys.Key.fromFile(
options['filename'], passphrase = options['pass']).keyObject
if not options['newpass']:
while 1:
p1 = getpass.getpass('Enter new passphrase (empty for no passphrase): ')
p2 = getpass.getpass('Enter same passphrase again: ')
if p1 == p2:
break
print 'Passphrases do not match. Try again.'
options['newpass'] = p1
open(options['filename'], 'w').write(
keys.Key(key).toString(passphrase=options['newpass']))
print 'Your identification has been saved with the new passphrase.'
def displayPublicKey(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
try:
key = keys.Key.fromFile(options['filename']).keyObject
except keys.BadKeyError, e:
if e.args[0] != 'encrypted key with no passphrase':
raise
else:
if not options['pass']:
options['pass'] = getpass.getpass('Enter passphrase: ')
key = keys.Key.fromFile(
options['filename'], passphrase = options['pass']).keyObject
print keys.Key(key).public().toString()
def _saveKey(key, options):
if not options['filename']:
kind = keys.objectType(key)
kind = {'ssh-rsa':'rsa','ssh-dss':'dsa'}[kind]
filename = os.path.expanduser('~/.ssh/id_%s'%kind)
options['filename'] = raw_input('Enter file in which to save the key (%s): '%filename).strip() or filename
if os.path.exists(options['filename']):
print '%s already exists.' % options['filename']
yn = raw_input('Overwrite (y/n)? ')
if yn[0].lower() != 'y':
sys.exit()
if not options['pass']:
while 1:
p1 = getpass.getpass('Enter passphrase (empty for no passphrase): ')
p2 = getpass.getpass('Enter same passphrase again: ')
if p1 == p2:
break
print 'Passphrases do not match. Try again.'
options['pass'] = p1
keyObj = keys.Key(key)
comment = '%s@%s' % (getpass.getuser(), socket.gethostname())
filepath.FilePath(options['filename']).setContent(
keyObj.toString('openssh', options['pass']))
os.chmod(options['filename'], 33152)
filepath.FilePath(options['filename'] + '.pub').setContent(
keyObj.public().toString('openssh', comment))
print 'Your identification has been saved in %s' % options['filename']
print 'Your public key has been saved in %s.pub' % options['filename']
print 'The key fingerprint is:'
print keyObj.fingerprint()
if __name__ == '__main__':
run()
| 0.004106 |
import wx
import wx.richtext
import gettext
t = gettext.translation('astrotortilla', 'locale', fallback=True)
_ = t.gettext
def create(parent):
return DlgHelpAbout(parent)
[wxID_DLGHELPABOUT, wxID_DLGHELPABOUTCLOSE, wxID_DLGHELPABOUTRICHTEXTCTRL1,
wxID_DLGHELPABOUTSTATICTEXT1,
] = [wx.NewId() for _init_ctrls in range(4)]
class DlgHelpAbout(wx.Dialog):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_DLGHELPABOUT, name='', parent=prnt,
pos=wx.Point(568, 131), size=wx.Size(292, 315),
style=wx.DEFAULT_DIALOG_STYLE, title=_('About AstroTortilla'))
self.SetClientSize(wx.Size(284, 281))
self.close = wx.Button(id=wxID_DLGHELPABOUTCLOSE, label=_('Close'),
name='close', parent=self, pos=wx.Point(104, 240),
size=wx.Size(75, 23), style=0)
self.close.Center(wx.HORIZONTAL)
self.close.SetToolTipString('Close')
self.close.SetHelpText('Close')
self.close.Bind(wx.EVT_BUTTON, self.OnCloseButton,
id=wxID_DLGHELPABOUTCLOSE)
self.staticText1 = wx.StaticText(id=wxID_DLGHELPABOUTSTATICTEXT1,
label='AstroTortilla ', name='staticText1', parent=self,
pos=wx.Point(16, 16), size=wx.Size(61, 13), style=0)
self.richTextCtrl1 = wx.richtext.RichTextCtrl(
id=wxID_DLGHELPABOUTRICHTEXTCTRL1,
parent=self, pos=wx.Point(16, 32), size=wx.Size(248, 200),
style=wx.richtext.RE_MULTILINE,
value=_(
'AstroTortilla is aimed for automating telescope GoTo corrections using plate-solving. It uses a simple wrapper library for telescope control, image capture and plate-solving. See the files README and LICENSE for details.\n\nAstroTortilla and the library are licensed under the GNU General Public License v2.\n\nAstroTortilla and the library use:\n') +
' * Python\n * PyWX\n * win32all for Python\n * Python Win32 GUI Automation\n * Python Imaging Library (PIL)\n * Astrometry.net astrometrical plate-solving SW\n * CygWin (for executing Astrometry.net)\n\n' +
'http://astrotortilla.sf.net/\n\n' +
'Copyright 2012-2014 AstroTortilla team <[email protected]>\n' +
'Copyright 2010-2011 Antti Kuntsi')
self.richTextCtrl1.SetLabel('text')
def __init__(self, parent):
self._init_ctrls(parent)
self.staticText1.SetLabel("AstroTortilla %s" % parent.engine.version)
self.staticText1.SetSize(wx.Size(-1, 13))
def OnCloseButton(self, event):
self.Close()
| 0.00286 |
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import itertools
import json
import logging
import os
from collections import OrderedDict, defaultdict
from urllib import parse
from pants.backend.jvm.subsystems.jar_dependency_management import (
JarDependencyManagement,
PinnedJarArtifactSet,
)
from pants.backend.jvm.subsystems.resolve_subsystem import JvmResolveSubsystem
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.coursier.coursier_subsystem import CoursierSubsystem
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.backend.jvm.tasks.resolve_shared import JvmResolverBase
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import FingerprintStrategy
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.java import util
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.util.contextutil import temporary_file
from pants.util.dirutil import safe_mkdir
from pants.util.fileutil import safe_hardlink_or_copy
logger = logging.getLogger(__name__)
class CoursierResultNotFound(Exception):
pass
class ConflictingDepsError(Exception):
"""Indicates two or more declared dependencies conflict."""
pass
class CoursierMixin(JvmResolverBase):
"""Experimental 3rdparty resolver using coursier.
TODO(wisechengyi):
1. Add relative url support
"""
RESULT_FILENAME = "result"
@classmethod
def implementation_version(cls):
return super().implementation_version() + [("CoursierMixin", 2)]
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (
CoursierSubsystem,
DistributionLocator,
JarDependencyManagement,
)
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--allow-global-excludes",
type=bool,
advanced=False,
fingerprint=True,
default=True,
help="Whether global excludes are allowed.",
)
register(
"--report",
type=bool,
advanced=False,
default=False,
help="Show the resolve output. This would also force a resolve even if the resolve task is validated.",
)
@staticmethod
def _compute_jars_to_resolve_and_pin(raw_jars, artifact_set, manager):
"""This method provides settled lists of jar dependencies and coordinates based on conflict
management.
:param raw_jars: a collection of `JarDependencies`
:param artifact_set: PinnedJarArtifactSet
:param manager: JarDependencyManagement
:return: (list of settled `JarDependency`, set of pinned `M2Coordinate`)
"""
if artifact_set is None:
artifact_set = PinnedJarArtifactSet()
untouched_pinned_artifact = {M2Coordinate.create(x) for x in artifact_set}
jar_list = list(raw_jars)
for i, dep in enumerate(jar_list):
direct_coord = M2Coordinate.create(dep)
# Portion to manage pinned jars in case of conflict
if direct_coord in artifact_set:
managed_coord = artifact_set[direct_coord]
untouched_pinned_artifact.remove(managed_coord)
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(
managed_coord, direct_coord, force=dep.force
)
# Once a version is settled, we force it anyway
jar_list[i] = dep.copy(rev=coord.rev, force=True)
return jar_list, untouched_pinned_artifact
def resolve(self, targets, compile_classpath, sources, javadoc, executor):
"""This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a
"""
manager = JarDependencyManagement.global_instance()
jar_targets = manager.targets_by_artifact_set(targets)
executor = executor or SubprocessExecutor(DistributionLocator.cached())
if not isinstance(executor, Executor):
raise ValueError(
"The executor argument must be an Executor instance, given {} of type {}".format(
executor, type(executor)
)
)
for artifact_set, target_subset in jar_targets.items():
raw_jar_deps, global_excludes = calculate_classpath(target_subset)
confs_for_fingerprint = ["sources"] * sources + ["javadoc"] * javadoc
fp_strategy = CoursierResolveFingerprintStrategy(confs_for_fingerprint)
compile_classpath.add_excludes_for_targets(target_subset)
with self.invalidated(
target_subset,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=fp_strategy,
) as invalidation_check:
if not invalidation_check.all_vts:
continue
resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vt_set_results_dir = self._prepare_vts_results_dir(resolve_vts)
pants_jar_base_dir = self._prepare_workdir()
coursier_cache_dir = CoursierSubsystem.global_instance().get_options().cache_dir
# If a report is requested, do not proceed with loading validated result.
if not self.get_options().report:
# Check each individual target without context first
# If the individuals are valid, check them as a VersionedTargetSet
# The order of 'or' statement matters, because checking for cache is more expensive.
if resolve_vts.valid or (
self.artifact_cache_reads_enabled()
and len(self.check_artifact_cache([resolve_vts])[0])
== len(resolve_vts.targets)
):
# Load up from the results dir
success = self._load_from_results_dir(
compile_classpath,
vt_set_results_dir,
coursier_cache_dir,
invalidation_check,
pants_jar_base_dir,
)
if success:
resolve_vts.update()
return
jars_to_resolve, pinned_coords = self._compute_jars_to_resolve_and_pin(
raw_jar_deps, artifact_set, manager
)
results = self._get_result_from_coursier(
jars_to_resolve,
global_excludes,
pinned_coords,
coursier_cache_dir,
sources,
javadoc,
executor,
)
for conf, result_list in results.items():
for result in result_list:
self._load_json_result(
conf,
compile_classpath,
coursier_cache_dir,
invalidation_check,
pants_jar_base_dir,
result,
self._override_classifiers_for_conf(conf),
)
self._populate_results_dir(vt_set_results_dir, results)
resolve_vts.update()
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(resolve_vts, [vt_set_results_dir])])
@staticmethod
def _override_classifiers_for_conf(conf):
# TODO Encapsulate this in the result from coursier instead of here.
# https://github.com/coursier/coursier/issues/803
if conf == "src_doc":
return ["sources", "javadoc"]
else:
return None
def _prepare_vts_results_dir(self, vts):
"""Given a `VergetTargetSet`, prepare its results dir."""
vt_set_results_dir = os.path.join(self.versioned_workdir, "results", vts.cache_key.hash)
safe_mkdir(vt_set_results_dir)
return vt_set_results_dir
def _prepare_workdir(self):
"""Prepare the location in our task workdir to store all the hardlinks to coursier cache
dir."""
pants_jar_base_dir = os.path.join(self.versioned_workdir, "cache")
safe_mkdir(pants_jar_base_dir)
return pants_jar_base_dir
def _get_result_from_coursier(
self,
jars_to_resolve,
global_excludes,
pinned_coords,
coursier_cache_path,
sources,
javadoc,
executor,
):
"""Calling coursier and return the result per invocation.
If coursier was called once for classifier '' and once for classifier 'tests', then the return value
would be: {'default': [<first coursier output>, <second coursier output>]}
:param jars_to_resolve: List of `JarDependency`s to resolve
:param global_excludes: List of `M2Coordinate`s to exclude globally
:param pinned_coords: List of `M2Coordinate`s that need to be pinned.
:param coursier_cache_path: path to where coursier cache is stored.
:param executor: An instance of `pants.java.executor.Executor`
:return: The aggregation of results by conf from coursier. Each coursier call could return
the following:
{
"conflict_resolution": {
"org:name:version" (requested): "org:name:version" (reconciled)
},
"dependencies": [
{
"coord": "orgA:nameA:versionA",
"file": <path>,
"dependencies": [ // coodinates for its transitive dependencies
<orgX:nameX:versionX>,
<orgY:nameY:versionY>,
]
},
{
"coord": "orgB:nameB:jar:classifier:versionB",
"file": <path>,
"dependencies": [ // coodinates for its transitive dependencies
<orgX:nameX:versionX>,
<orgZ:nameZ:versionZ>,
]
},
... // more about orgX:nameX:versionX, orgY:nameY:versionY, orgZ:nameZ:versionZ
]
}
Hence the aggregation of the results will be in the following format, for example when default classifier
and sources are fetched:
{
'default': [<result from coursier call with default conf with classifier X>,
<result from coursier call with default conf with classifier Y>],
'src_doc': [<result from coursier call with --sources and/or --javadoc>],
}
"""
# Prepare coursier args
coursier_subsystem_instance = CoursierSubsystem.global_instance()
coursier_jar = coursier_subsystem_instance.select(self.context)
repos = coursier_subsystem_instance.get_options().repos
# make [repoX, repoY] -> ['-r', repoX, '-r', repoY]
repo_args = list(itertools.chain(*list(zip(["-r"] * len(repos), repos))))
artifact_types_arg = [
"-A",
",".join(coursier_subsystem_instance.get_options().artifact_types),
]
advanced_options = coursier_subsystem_instance.get_options().fetch_options
common_args = (
[
"fetch",
# Print the resolution tree
"-t",
"--cache",
coursier_cache_path,
]
+ repo_args
+ artifact_types_arg
+ advanced_options
)
coursier_work_temp_dir = os.path.join(self.versioned_workdir, "tmp")
safe_mkdir(coursier_work_temp_dir)
results_by_conf = self._get_default_conf_results(
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
executor,
)
if sources or javadoc:
non_default_conf_results = self._get_non_default_conf_results(
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
sources,
javadoc,
executor,
)
results_by_conf.update(non_default_conf_results)
return results_by_conf
def _get_default_conf_results(
self,
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
executor,
):
# Variable to store coursier result each run.
results = defaultdict(list)
with temporary_file(coursier_work_temp_dir, cleanup=False) as f:
output_fn = f.name
cmd_args = self._construct_cmd_args(
jars_to_resolve,
common_args,
global_excludes if self.get_options().allow_global_excludes else [],
pinned_coords,
coursier_work_temp_dir,
output_fn,
)
results["default"].append(self._call_coursier(cmd_args, coursier_jar, output_fn, executor))
return results
def _get_non_default_conf_results(
self,
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
sources,
javadoc,
executor,
):
# To prevent improper api usage during development. User should not see this anyway.
if not sources and not javadoc:
raise TaskError("sources or javadoc has to be True.")
with temporary_file(coursier_work_temp_dir, cleanup=False) as f:
output_fn = f.name
results = defaultdict(list)
new_pinned_coords = []
new_jars_to_resolve = []
special_args = []
if not sources and not javadoc:
new_pinned_coords = pinned_coords
new_jars_to_resolve = jars_to_resolve
if sources:
special_args.append("--sources")
new_pinned_coords.extend(c.copy(classifier="sources") for c in pinned_coords)
new_jars_to_resolve.extend(c.copy(classifier="sources") for c in jars_to_resolve)
if javadoc:
special_args.append("--javadoc")
new_pinned_coords.extend(c.copy(classifier="javadoc") for c in pinned_coords)
new_jars_to_resolve.extend(c.copy(classifier="javadoc") for c in jars_to_resolve)
cmd_args = self._construct_cmd_args(
new_jars_to_resolve,
common_args,
global_excludes if self.get_options().allow_global_excludes else [],
new_pinned_coords,
coursier_work_temp_dir,
output_fn,
)
cmd_args.extend(special_args)
# sources and/or javadoc share the same conf
results["src_doc"] = [self._call_coursier(cmd_args, coursier_jar, output_fn, executor)]
return results
def _call_coursier(self, cmd_args, coursier_jar, output_fn, executor):
runner = executor.runner(
classpath=[coursier_jar],
main="coursier.cli.Coursier",
jvm_options=self.get_options().jvm_options,
args=cmd_args,
)
labels = [WorkUnitLabel.COMPILER] if self.get_options().report else [WorkUnitLabel.TOOL]
return_code = util.execute_runner(runner, self.context.new_workunit, "coursier", labels)
if return_code:
raise TaskError(f"The coursier process exited non-zero: {return_code}")
with open(output_fn, "r") as f:
return json.loads(f.read())
@staticmethod
def _construct_cmd_args(
jars, common_args, global_excludes, pinned_coords, coursier_workdir, json_output_path
):
# Make a copy, so there is no side effect or others using `common_args`
cmd_args = list(common_args)
cmd_args.extend(["--json-output-file", json_output_path])
# Dealing with intransitivity and forced versions.
for j in jars:
if not j.rev:
raise TaskError(
'Undefined revs for jars unsupported by Coursier. "{}"'.format(
repr(j.coordinate).replace("M2Coordinate", "jar")
)
)
module = j.coordinate.simple_coord
if j.coordinate.classifier:
module += f",classifier={j.coordinate.classifier}"
if j.get_url():
jar_url = j.get_url()
module += f",url={parse.quote_plus(jar_url)}"
if j.intransitive:
cmd_args.append("--intransitive")
cmd_args.append(module)
# Force requires specifying the coord again with -V
if j.force:
cmd_args.append("-V")
cmd_args.append(j.coordinate.simple_coord)
# Force pinned coordinates
for m2coord in pinned_coords:
cmd_args.append("-V")
cmd_args.append(m2coord.simple_coord)
# Local exclusions
local_exclude_args = []
for jar in jars:
for ex in jar.excludes:
# `--` means exclude. See --local-exclude-file in `coursier fetch --help`
# If ex.name does not exist, that means the whole org needs to be excluded.
ex_arg = f"{jar.org}:{jar.name}--{ex.org}:{ex.name or '*'}"
local_exclude_args.append(ex_arg)
if local_exclude_args:
with temporary_file(coursier_workdir, cleanup=False) as f:
exclude_file = f.name
with open(exclude_file, "w") as ex_f:
ex_f.write("\n".join(local_exclude_args))
cmd_args.append("--local-exclude-file")
cmd_args.append(exclude_file)
for ex in global_excludes:
cmd_args.append("-E")
cmd_args.append(f"{ex.org}:{ex.name or '*'}")
return cmd_args
def _load_json_result(
self,
conf,
compile_classpath,
coursier_cache_path,
invalidation_check,
pants_jar_path_base,
result,
override_classifiers=None,
):
"""Given a coursier run result, load it into compile_classpath by target.
:param compile_classpath: `ClasspathProducts` that will be modified
:param coursier_cache_path: cache location that is managed by coursier
:param invalidation_check: InvalidationCheck
:param pants_jar_path_base: location under pants workdir that contains all the hardlinks to coursier cache
:param result: result dict converted from the json produced by one coursier run
:return: n/a
"""
# Parse the coursier result
flattened_resolution = self._extract_dependencies_by_root(result)
coord_to_resolved_jars = self._map_coord_to_resolved_jars(
result, coursier_cache_path, pants_jar_path_base
)
# Construct a map from org:name to the reconciled org:name:version coordinate
# This is used when there is won't be a conflict_resolution entry because the conflict
# was resolved in pants.
org_name_to_org_name_rev = {}
for coord in coord_to_resolved_jars.keys():
org_name_to_org_name_rev[f"{coord.org}:{coord.name}"] = coord
jars_per_target = []
for vt in invalidation_check.all_vts:
t = vt.target
jars_to_digest = []
if isinstance(t, JarLibrary):
def get_transitive_resolved_jars(my_coord, resolved_jars):
transitive_jar_path_for_coord = []
coord_str = str(my_coord)
if coord_str in flattened_resolution and my_coord in resolved_jars:
transitive_jar_path_for_coord.append(resolved_jars[my_coord])
for c in flattened_resolution[coord_str]:
j = resolved_jars.get(self.to_m2_coord(c))
if j:
transitive_jar_path_for_coord.append(j)
return transitive_jar_path_for_coord
for jar in t.jar_dependencies:
# if there are override classifiers, then force use of those.
if override_classifiers:
coord_candidates = [
jar.coordinate.copy(classifier=c) for c in override_classifiers
]
else:
coord_candidates = [jar.coordinate]
# if conflict resolution entries, then update versions to the resolved ones.
if jar.coordinate.simple_coord in result["conflict_resolution"]:
parsed_conflict = self.to_m2_coord(
result["conflict_resolution"][jar.coordinate.simple_coord]
)
coord_candidates = [
c.copy(rev=parsed_conflict.rev) for c in coord_candidates
]
elif f"{jar.coordinate.org}:{jar.coordinate.name}" in org_name_to_org_name_rev:
parsed_conflict = org_name_to_org_name_rev[
f"{jar.coordinate.org}:{jar.coordinate.name}"
]
coord_candidates = [
c.copy(rev=parsed_conflict.rev) for c in coord_candidates
]
for coord in coord_candidates:
transitive_resolved_jars = get_transitive_resolved_jars(
coord, coord_to_resolved_jars
)
if transitive_resolved_jars:
for tjar in transitive_resolved_jars:
jars_to_digest.append(tjar)
jars_per_target.append((t, jars_to_digest))
for target, jars_to_add in self.add_directory_digests_for_jars(jars_per_target):
if override_classifiers is not None:
for jar in jars_to_add:
compile_classpath.add_jars_for_targets(
[target], jar.coordinate.classifier, [jar]
)
else:
compile_classpath.add_jars_for_targets([target], conf, jars_to_add)
def _populate_results_dir(self, vts_results_dir, results):
with open(os.path.join(vts_results_dir, self.RESULT_FILENAME), "w") as f:
json.dump(results, f)
def _load_from_results_dir(
self,
compile_classpath,
vts_results_dir,
coursier_cache_path,
invalidation_check,
pants_jar_path_base,
):
"""Given vts_results_dir, load the results which can be from multiple runs of coursier into
compile_classpath.
:return: True if success; False if any of the classpath is not valid anymore.
"""
result_file_path = os.path.join(vts_results_dir, self.RESULT_FILENAME)
if not os.path.exists(result_file_path):
return
with open(result_file_path, "r") as f:
results = json.load(f)
for conf, result_list in results.items():
for result in result_list:
try:
self._load_json_result(
conf,
compile_classpath,
coursier_cache_path,
invalidation_check,
pants_jar_path_base,
result,
self._override_classifiers_for_conf(conf),
)
except CoursierResultNotFound:
return False
return True
@classmethod
def _extract_dependencies_by_root(cls, result):
"""Only extracts the transitive dependencies for the given coursier resolve. Note the
"dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
"""
flat_result = defaultdict(list)
for artifact in result["dependencies"]:
flat_result[artifact["coord"]].extend(artifact["dependencies"])
return flat_result
@classmethod
def _map_coord_to_resolved_jars(cls, result, coursier_cache_path, pants_jar_path_base):
"""Map resolved files to each org:name:version.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"],
"file": "a.jar"
},
{
"coord": "b",
"dependencies": [],
"file": "b.jar"
},
{
"coord": "c",
"dependencies": [],
"file": "c.jar"
},
{
"coord": "a:sources",
"dependencies": ["b", "c"],
"file": "a-sources.jar"
},
]
}
Should return:
{
M2Coordinate("a", ...): ResolvedJar(classifier='', path/cache_path="a.jar"),
M2Coordinate("a", ..., classifier="sources"): ResolvedJar(classifier='sources', path/cache_path="a-sources.jar"),
M2Coordinate("b", ...): ResolvedJar(classifier='', path/cache_path="b.jar"),
M2Coordinate("c", ...): ResolvedJar(classifier='', path/cache_path="c.jar"),
}
:param result: coursier json output
:param coursier_cache_path: coursier cache location
:param pants_jar_path_base: location under pants workdir to store the hardlink to the coursier cache
:return: a map from maven coordinate to a resolved jar.
"""
coord_to_resolved_jars = dict()
for dep in result["dependencies"]:
coord = dep["coord"]
jar_path = dep.get("file", None)
if not jar_path:
# NB: Not all coordinates will have associated files.
# This is fine. Some coordinates will just have dependencies.
continue
if not os.path.exists(jar_path):
raise CoursierResultNotFound(f"Jar path not found: {jar_path}")
pants_path = cls._get_path_to_jar(coursier_cache_path, pants_jar_path_base, jar_path)
if not os.path.exists(pants_path):
safe_mkdir(os.path.dirname(pants_path))
safe_hardlink_or_copy(jar_path, pants_path)
coord = cls.to_m2_coord(coord)
resolved_jar = ResolvedJar(coord, cache_path=jar_path, pants_path=pants_path)
coord_to_resolved_jars[coord] = resolved_jar
return coord_to_resolved_jars
@classmethod
def to_m2_coord(cls, coord_str):
return M2Coordinate.from_string(coord_str)
@classmethod
def _get_path_to_jar(cls, coursier_cache_path, pants_jar_path_base, jar_path):
"""Create the path to the jar that will live in .pants.d.
:param coursier_cache_path: coursier cache location
:param pants_jar_path_base: location under pants workdir to store the hardlink to the coursier cache
:param jar_path: path of the jar
:return:
"""
if os.path.abspath(coursier_cache_path) not in os.path.abspath(jar_path):
# Appending the string 'absolute' to the jar_path and joining that is a hack to work around
# python's os.path.join behavior of throwing away all components that come before an
# absolute path. See https://docs.python.org/3.3/library/os.path.html#os.path.join
return os.path.join(pants_jar_path_base, os.path.normpath("absolute/" + jar_path))
else:
return os.path.join(
pants_jar_path_base, "relative", os.path.relpath(jar_path, coursier_cache_path)
)
class CoursierResolve(CoursierMixin, NailgunTask):
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (JvmResolveSubsystem,)
@classmethod
def product_types(cls):
return ["compile_classpath", "resolve_sources_signal", "resolve_javadocs_signal"]
@classmethod
def prepare(cls, options, round_manager):
super().prepare(options, round_manager)
# Codegen may inject extra resolvable deps, so make sure we have a product dependency
# on relevant codegen tasks, if any.
round_manager.optional_data("java")
round_manager.optional_data("scala")
@classmethod
def register_options(cls, register):
super().register_options(register)
@classmethod
def implementation_version(cls):
return super().implementation_version() + [("CoursierResolve", 2)]
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path)."""
classpath_products = self.context.products.get_data(
"compile_classpath",
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir),
)
executor = self.create_java_executor()
self.resolve(
self.context.targets(),
classpath_products,
sources=self.context.products.is_required_data("resolve_sources_signal"),
javadoc=self.context.products.is_required_data("resolve_javadocs_signal"),
executor=executor,
)
def check_artifact_cache_for(self, invalidation_check):
# Coursier resolution is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
class CoursierResolveFingerprintStrategy(FingerprintStrategy):
def __init__(self, confs):
super().__init__()
self._confs = sorted(confs or [])
def compute_fingerprint(self, target):
hash_elements_for_target = []
if isinstance(target, JarLibrary):
managed_jar_artifact_set = JarDependencyManagement.global_instance().for_target(target)
if managed_jar_artifact_set:
hash_elements_for_target.append(str(managed_jar_artifact_set.id))
hash_elements_for_target.append(target.payload.fingerprint())
elif isinstance(target, JvmTarget) and target.payload.excludes:
hash_elements_for_target.append(target.payload.fingerprint(field_keys=("excludes",)))
else:
pass
if not hash_elements_for_target:
return None
hasher = hashlib.sha1()
hasher.update(target.payload.fingerprint().encode())
for conf in self._confs:
hasher.update(conf.encode())
for element in hash_elements_for_target:
hasher.update(element.encode())
# Just in case so we do not collide with ivy cache
hasher.update(b"coursier")
return hasher.hexdigest()
def __hash__(self):
return hash((type(self), "-".join(self._confs)))
def __eq__(self, other):
return type(self) == type(other) and self._confs == other._confs
def calculate_classpath(targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jr):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coord = (jr.org, jr.name, jr.classifier)
existing = jars.get(coord)
jars[coord] = jr if not existing else _resolve_conflict(existing=existing, proposed=jr)
def collect_jars(tgt):
if isinstance(tgt, JarLibrary):
for jr in tgt.jar_dependencies:
add_jar(jr)
def collect_excludes(tgt):
target_excludes = tgt.payload.get_field_value("excludes")
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(tgt):
if not (isinstance(tgt, ExportableJvmLibrary) and tgt.provides):
return
logger.debug(
"Automatically excluding jar {}.{}, which is provided by {}".format(
tgt.provides.org, tgt.provides.name, tgt
)
)
provide_excludes.add(Exclude(org=tgt.provides.org, name=tgt.provides.name))
def collect_elements(tgt):
targets_processed.add(tgt)
collect_jars(tgt)
collect_excludes(tgt)
collect_provide_excludes(tgt)
for target in targets:
target.walk(collect_elements, predicate=lambda tgt: tgt not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return list(jars.values()), global_excludes
def _resolve_conflict(existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise ConflictingDepsError(
"Cannot force {}#{};{} to both rev {} and {}".format(
proposed.org, proposed.name, proposed.classifier or "", existing.rev, proposed.rev,
)
)
elif existing.force:
logger.debug(
"Ignoring rev {} for {}#{};{} already forced to {}".format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or "", existing.rev,
)
)
return existing
elif proposed.force:
logger.debug(
"Forcing {}#{};{} from {} to {}".format(
proposed.org, proposed.name, proposed.classifier or "", existing.rev, proposed.rev,
)
)
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug(
"Upgrading {}#{};{} from rev {} to {}".format(
proposed.org,
proposed.name,
proposed.classifier or "",
existing.rev,
proposed.rev,
)
)
return proposed
else:
return existing
| 0.003212 |
import os, shutil, mimetypes, boto3
from pprint import PrettyPrinter
from abc import ABCMeta, abstractmethod
from botocore import exceptions as bx
from shiftmedia import exceptions as x
class Backend(metaclass=ABCMeta):
"""
Abstract backend
This defines methods your backend must implement in order to
work with media storage
"""
@abstractmethod
def __init__(self, url='http://localhost'):
"""
Backend constructor
Requires a base storage url to build links.
:param url: string - base storage url
"""
self._url = url
def get_url(self):
"""
Get URL
Returns base URL of storage
"""
return self._url
@abstractmethod
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
Will raise an exception on an attempt to overwrite existing file which
you can force to ignore.
"""
pass
@abstractmethod
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
pass
@abstractmethod
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file original from storage and put to local temp path
"""
pass
@abstractmethod
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
pass
@abstractmethod
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
pass
@abstractmethod
def clear_variants(self):
"""
Clear variants
Iterates through storage and removes all files that are not originals.
This is a good way to clear residual files not being used and
regenerate the once in use.
:return: Bool
"""
pass
class BackendLocal(Backend):
"""
Local backend
Stores file locally in a directory without transferring to remote storage
"""
def __init__(self, local_path=None, url='http://localhost'):
"""
Backend constructor
Requires a local storage path and base storage url.
:param local_path: string - path to local temp dir
:param url: string - base storage url
"""
super().__init__(url)
self._path = local_path
@property
def path(self):
"""
Get path
Returns path to local storage and creates one if necessary
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
return self._path
def id_to_path(self, id):
"""
Id to path
Returns a list of directories extracted from id
:param id: string, - object id
:return: list
"""
parts = id.lower().split('-')[0:5]
tail = id[len('-'.join(parts)) + 1:]
parts.append(tail)
return parts
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
url = url.replace(self._url, '')
url = url.strip('/').lower()
url = url.split('/')
id = '-'.join(url[:-1])
filename = url[-1]
return id, filename
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
the resulting path will have following structure:
3c72aedc/ba25/11e6/569/406c8f413974/file.jpg/file.jpg
:param src: string - path to source file
:param id: string - generated id
:param force: bool - whether to overwrite existing
:return: string - generated id
"""
filename = '-'.join(id.split('-')[5:])
return self.put_variant(src, id, filename, force)
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
if not os.path.exists(src):
msg = 'Unable to find local file [{}]'
raise x.LocalFileNotFound(msg.format(src))
parts = self.id_to_path(id)
dir = os.path.join(self.path, *parts)
os.makedirs(dir, exist_ok=True)
dst = os.path.join(self.path, *parts, filename)
if not force and os.path.exists(dst):
msg = 'File [' + filename + '] exists under [' + id + ']. '
msg += 'Use force option to overwrite.'
raise x.FileExists(msg)
shutil.copyfile(src, dst)
return id
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
id = str(id)
path = os.path.join(self.path, *id.split('-')[0:5])
shutil.rmtree(path)
return True
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file from storage and put to local temp path
"""
path = self.id_to_path(id)
filename = path[5]
src = os.path.join(self.path, *path, filename)
dst_dir = os.path.join(local_path, id)
dst = os.path.join(dst_dir, filename)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, exist_ok=True)
shutil.copyfile(src, dst)
return dst
def clear_variants(self):
"""
Clear variants
Iterates through storage and removes all files that are not originals.
This is a good way to clear residual files not being used and
regenerate the once in use.
:return: Bool
"""
for subdir, dirs, files in os.walk(self.path):
for file in files:
if subdir.endswith(file): continue # skip originals
path = os.path.join(subdir, file)
os.remove(path)
return True
class BackendS3(Backend):
"""
Amazon S3 backend
Stores files in an amazon s3 bucket
"""
def __init__(
self,
key_id,
access_secret,
bucket,
region,
url='http://localhost'
):
"""
S3 Backend constructor
Creates an instance of s3 backend, requires credentials to access
amazon s3 and bucket name.
:param key_id: string - AWS IAM Key id
:param access_secret: string - AWS IAM Access secret
:param bucket: string - AWS S3 bucket name, e.g. 'test-bucket'
:param region: string - AWS S3 bucket region, e.g. 'eu-west-1'
:param url: string - base storage url
"""
self.bucket_name = bucket
self.bucket_region = region
self.credentials = dict(
aws_access_key_id=key_id,
aws_secret_access_key=access_secret,
region_name=region, # a bit weird that region goes here
)
super().__init__(url)
def pp(self, what):
""" Pretty-prints an object"""
printer = PrettyPrinter(indent=2)
printer.pprint(what)
def id_to_path(self, id):
"""
Id to path
Returns a list of directories extracted from id
:param id: string, - object id
:return: list
"""
parts = id.lower().split('-')[0:5]
tail = id[len('-'.join(parts)) + 1:]
parts.append(tail)
return parts
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
url = url.replace(self._url, '')
url = url.strip('/').lower()
url = url.split('/')
id = '-'.join(url[:-1])
filename = url[-1]
return id, filename
def exists(self, object):
"""
Exists
Checks whether a file or directory/ exists and returns a boolean result.
Be careful with directories - it might appear that they exist when
in fact they don't, e.g. /some/path/file.txt existence doesn't
necessarily mean .some/path exists. Thus it is more helpful to check
for file existence, i.e. the FULL key existence.
:param object: string - file or directory/
:return: bool
"""
try:
resource = boto3.resource('s3', **self.credentials)
resource.Object(self.bucket_name, object).load()
except bx.ClientError as e:
if e.response['Error']['Code'] == '404': return False
else: raise e
return True
def recursive_delete(self, path=None):
"""
Recursive delete
Deletes all objects recursively under given path. If path is not
provided, will delete every object in the bucket. Be careful!
:param path: string - objects starting with this will be deleted
:return: None
"""
client = boto3.client('s3', **self.credentials)
paginator = client.get_paginator('list_objects_v2')
params = dict(Bucket=self.bucket_name)
if path: params['Prefix'] = path
pages = paginator.paginate(**params)
delete_us = dict(Objects=[])
bucket = self.bucket_name
for item in pages.search('Contents'):
if not item: continue
delete_us['Objects'].append(dict(Key=item['Key']))
# flush full page
if len(delete_us['Objects']) >= 999:
client.delete_objects(Bucket=bucket, Delete=delete_us)
delete_us = dict(Objects=[])
# flush last page
if len(delete_us['Objects']):
client.delete_objects(Bucket=bucket, Delete=delete_us)
def put(self, src, id, force=False, content_type=None, encoding=None):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
the resulting path will have following structure:
3c72aedc/ba25/11e6/569/406c8f413974/file.jpg/file.jpg
:param src: string - path to source file
:param id: string - generated id
:param force: bool - whether to overwrite existing
:param content_type: string - content/type, guessed if none
:param encoding: string - content encoding, guessed if none
:return: string - generated id
"""
filename = '-'.join(id.split('-')[5:])
return self.put_variant(
src,
id,
filename,
force,
content_type,
encoding
)
def put_variant(
self,
src,
id,
filename,
force=False,
content_type=None,
encoding=None):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore. By default will guess content-type and content-encoding based
on file extension that you can override to set your own.
:param src: string - path to local file
:param id: string - storage object id
:param filename: string - varian filename
:param force: bool - whether to overwrite if exists
:param content_type: string - content/type, guessed if none
:param encoding: string - content encoding, guessed if none
:return: string put object id
"""
if not os.path.exists(src):
msg = 'Unable to find local file [{}]'
raise x.LocalFileNotFound(msg.format(src))
path = '/'.join(self.id_to_path(id)) + '/' + filename
if not force and self.exists(path):
msg = 'File [' + filename + '] exists under [' + id + ']. '
msg += 'Use force option to overwrite.'
raise x.FileExists(msg)
if not content_type or not encoding:
guessed = mimetypes.guess_type(src)
content_type = content_type if content_type else guessed[0]
encoding = encoding if encoding else guessed[1]
client = boto3.client('s3', **self.credentials)
with open(src, 'rb') as src:
params = dict(
ACL='public-read',
Bucket=self.bucket_name,
Key=path,
Body=src
)
if content_type: params['ContentType'] = content_type
if encoding: params['ContentEncoding'] = encoding
client.put_object(**params)
return id
def delete(self, id):
"""
Delete
Remove file from storage by id. Since it searches for the keys
staring (!) with id, can accept nonexistent ids.
"""
path = '/'.join(self.id_to_path(id))
self.recursive_delete(path)
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file from storage and put to local temp path
:param id: string - storage object id
:param local_path: string - local path to download to
:return: path to local download
"""
path = self.id_to_path(id)
filename = path[5]
dst_dir = os.path.join(local_path, id)
dst = os.path.join(dst_dir, filename)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, exist_ok=True)
filename = '-'.join(id.split('-')[5:])
src = '/'.join(self.id_to_path(id)) + '/' + filename
client = boto3.client('s3', **self.credentials)
with open(dst, 'wb') as data:
client.download_fileobj(
Bucket=self.bucket_name,
Key=src,
Fileobj=data
)
return dst
def clear_variants(self):
"""
Clear variants
Iterates through storage and removes all files that are not originals.
This is a good way to clear residual files not being used and
regenerate the once in use.
Please, also consider configure bucket lifecycle expiration policy
in order to removed older images.
:return: Bool
"""
client = boto3.client('s3', **self.credentials)
paginator = client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=self.bucket_name)
delete_us = dict(Objects=[])
bucket = self.bucket_name
for item in pages.search('Contents'):
if not item: continue
key = str(item['Key'])
# skip dirs
if key.endswith('/'):
continue
# skip originals
parts = key.split('/')
length = len(parts)
if parts[length-1] == parts[length-2]: continue
delete_us['Objects'].append(dict(Key=item['Key']))
# flush full page
if len(delete_us['Objects']) >= 999:
client.delete_objects(Bucket=bucket, Delete=delete_us)
delete_us = dict(Objects=[])
# flush last page
if len(delete_us['Objects']):
client.delete_objects(Bucket=bucket, Delete=delete_us)
return True | 0.000992 |
"""
Event parser and human readable log generator.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logbook/
"""
import asyncio
import logging
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import sun
from homeassistant.components.frontend import register_built_in_panel
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
STATE_NOT_HOME, STATE_OFF, STATE_ON,
ATTR_HIDDEN, HTTP_BAD_REQUEST)
from homeassistant.core import State, split_entity_id, DOMAIN as HA_DOMAIN
DOMAIN = "logbook"
DEPENDENCIES = ['recorder', 'frontend']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
GROUP_BY_MINUTES = 15
ATTR_NAME = 'name'
ATTR_MESSAGE = 'message'
ATTR_DOMAIN = 'domain'
ATTR_ENTITY_ID = 'entity_id'
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
def setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
register_built_in_panel(hass, 'logbook', 'Logbook',
'mdi:format-list-bulleted-type')
hass.services.register(DOMAIN, 'log', log_message,
schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
"""Initilalize the logbook view."""
self.config = config
@asyncio.coroutine
def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
start_day = dt_util.as_utc(datetime)
end_day = start_day + timedelta(days=1)
hass = request.app['hass']
events = yield from hass.loop.run_in_executor(
None, _get_events, hass, start_day, end_day)
events = _exclude_events(events, self.config)
return self.json(humanify(events))
class Entry(object):
"""A human readable version of the log."""
def __init__(self, when=None, name=None, message=None, domain=None,
entity_id=None):
"""Initialize the entry."""
self.when = when
self.name = name
self.message = message
self.domain = domain
self.entity_id = entity_id
def as_dict(self):
"""Convert entry to a dict to be used within JSON."""
return {
'when': self.when,
'name': self.name,
'message': self.message,
'domain': self.domain,
'entity_id': self.entity_id,
}
def humanify(events):
"""Generator that converts a list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
continue
if entity_id.startswith('sensor.'):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# If last_changed != last_updated only attributes have changed
# we do not report on that yet. Also filter auto groups.
if not to_state or \
to_state.last_changed != to_state.last_updated or \
to_state.domain == 'group' and \
to_state.attributes.get('auto', False):
continue
domain = to_state.domain
# Skip all but the last sensor state
if domain == 'sensor' and \
event != last_sensor_event[to_state.entity_id]:
continue
# Don't show continuous sensor value changes in the logbook
if domain == 'sensor' and \
to_state.attributes.get('unit_of_measurement'):
continue
yield Entry(
event.time_fired,
name=to_state.name,
message=_entry_message_from_state(domain, to_state),
domain=domain,
entity_id=to_state.entity_id)
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield Entry(
event.time_fired, "Home Assistant", "started",
domain=HA_DOMAIN)
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield Entry(
event.time_fired, "Home Assistant", action,
domain=HA_DOMAIN)
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield Entry(
event.time_fired, event.data.get(ATTR_NAME),
event.data.get(ATTR_MESSAGE), domain,
entity_id)
def _get_events(hass, start_day, end_day):
"""Get events for a period of time."""
from homeassistant.components.recorder.models import Events
from homeassistant.components.recorder.util import (
execute, session_scope)
with session_scope(hass=hass) as session:
query = session.query(Events).order_by(
Events.time_fired).filter(
(Events.time_fired > start_day) &
(Events.time_fired < end_day))
return execute(query)
def _exclude_events(events, config):
"""Get lists of excluded entities and platforms."""
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude[CONF_ENTITIES]
excluded_domains = exclude[CONF_DOMAINS]
include = config.get(CONF_INCLUDE)
if include:
included_entities = include[CONF_ENTITIES]
included_domains = include[CONF_DOMAINS]
filtered_events = []
for event in events:
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# Do not report on new entities
if event.data.get('old_state') is None:
continue
# Do not report on entity removal
if not to_state:
continue
# exclude entities which are customized hidden
hidden = to_state.attributes.get(ATTR_HIDDEN, False)
if hidden:
continue
domain = to_state.domain
entity_id = to_state.entity_id
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain or entity_id:
# filter if only excluded is configured for this domain
if excluded_domains and domain in excluded_domains and \
not included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if only included is configured for this domain
elif not excluded_domains and included_domains and \
domain not in included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if included and excluded is configured for this domain
elif excluded_domains and included_domains and \
(domain not in included_domains or
domain in excluded_domains):
if (included_entities and entity_id not in included_entities) \
or not included_entities or domain in excluded_domains:
continue
# filter if only included is configured for this entity
elif not excluded_domains and not included_domains and \
included_entities and entity_id not in included_entities:
continue
# check if logbook entry is excluded for this entity
if entity_id in excluded_entities:
continue
filtered_events.append(event)
return filtered_events
# pylint: disable=too-many-return-statements
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain == 'device_tracker':
if state.state == STATE_NOT_HOME:
return 'is away'
else:
return 'is at {}'.format(state.state)
elif domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
else:
return 'has set'
elif state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
elif state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
| 0 |
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random, cgi
from operator import itemgetter
from invenio.config import CFG_TMPDIR, \
CFG_SITE_URL, \
CFG_SITE_NAME, \
CFG_BINDIR, \
CFG_CERN_SITE, \
CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, \
CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL, \
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE, \
CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED
from invenio.modules.indexer.tokenizers.BibIndexJournalTokenizer import CFG_JOURNAL_TAG
from invenio.utils.url import redirect_to_url
from invenio.legacy.search_engine import perform_request_search, \
get_collection_reclist, \
get_most_popular_field_values, \
search_pattern
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.dbquery import run_sql, \
wash_table_column_name
from invenio.legacy.websubmit.admin_dblayer import get_docid_docname_alldoctypes
from invenio.legacy.bibcirculation.utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.legacy.bibcirculation.db_layer import get_id_bibrec, \
get_borrower_data
CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE = None
from invenio.utils.date import convert_datetext_to_datestruct, convert_datestruct_to_dategui
from invenio.legacy.bibsched.bibtask import get_modified_records_since
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args, return_sql=False):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
if args.get('collection', 'All') == 'All':
sql_query_g = _get_sql_query("creation_date", args['granularity'],
"bibrec")
sql_query_i = "SELECT COUNT(id) FROM bibrec WHERE creation_date < %s"
initial_quantity = run_sql(sql_query_i, (lower, ))[0][0]
return _get_keyevent_trend(args, sql_query_g, initial_quantity=initial_quantity,
return_sql=return_sql, sql_text=
"Previous count: %s<br />Current count: %%s" % (sql_query_i),
acumulative=True)
else:
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
g = get_keyevent_trend_new_records(args, return_sql, True)
sql_query_i = "SELECT id FROM bibrec WHERE creation_date < %s"
if return_sql:
return "Previous count: %s<br />Current count: %s" % (sql_query_i % lower, g)
initial_quantity = len(filter(lambda x: x[0] in ids, run_sql(sql_query_i, (lower, ))))
return _get_trend_from_actions(g, initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative=True)
def get_keyevent_trend_new_records(args, return_sql=False, only_action=False):
"""
Returns the number of new records uploaded during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("creation_date", args['granularity'],
"bibrec"),
return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("creation_date", args["granularity"], "bibrec",
extra_select=", id", group_by=False, count=False)
if return_sql:
return sql % (lower, upper)
recs = run_sql(sql, (lower, upper))
if recs:
def add_count(i_list, element):
""" Reduce function to create a dictionary with the count of ids
for each date """
if i_list and element == i_list[-1][0]:
i_list[-1][1] += 1
else:
i_list.append([element, 1])
return i_list
action_dates = reduce(add_count,
map(lambda x: x[0], filter(lambda x: x[1] in ids, recs)),
[])
else:
action_dates = []
if only_action:
return action_dates
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args, return_sql=False):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query"),
return_sql=return_sql)
def get_keyevent_trend_comments_frequency(args, return_sql=False):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT")
else:
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT", conditions=
_get_collection_recids_for_sql_query(args['collection']))
return _get_keyevent_trend(args, sql, return_sql=return_sql)
def get_keyevent_trend_search_type_distribution(args, return_sql=False):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine all simple searches:
simple = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%p=%%'")
# SQL to determine all advanced searches:
advanced = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%as=1%%'")
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, simple,
return_sql=return_sql, sql_text="Simple: %s")
a_trend = _get_keyevent_trend(args, advanced,
return_sql=return_sql, sql_text="Advanced: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, a_trend)
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args, return_sql=False):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# Collect list of timestamps of insertion in the specific collection
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("download_time",
args["granularity"], "rnkDOWNLOADS"), return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("download_time", args["granularity"], "rnkDOWNLOADS",
extra_select=", GROUP_CONCAT(id_bibrec)")
if return_sql:
return sql % (lower, upper)
action_dates = []
for result in run_sql(sql, (lower, upper)):
count = result[1]
for id in result[2].split(","):
if id == '' or not int(id) in ids:
count -= 1
action_dates.append((result[0], count))
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args, return_sql=False):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("loaned_on",
args["granularity"], "crcLOAN"), return_sql=return_sql)
def get_keyevent_trend_web_submissions(args, return_sql=False):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args['doctype'] == 'all':
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, return_sql=return_sql)
else:
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="doctype=%s AND action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, extra_param=[args['doctype']],
return_sql=return_sql)
return res
def get_keyevent_loan_statistics(args, return_sql=False):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)"
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
# Number of loans:
loans_sql = "SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where
items_loaned_sql = "SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where
# Only the CERN site wants the items of the collection "Books & Proceedings"
if CFG_CERN_SITE:
items_in_book_coll = _get_collection_recids_for_sql_query("Books & Proceedings")
if items_in_book_coll == "":
total_items_sql = 0
else:
total_items_sql = "SELECT COUNT(*) FROM crcITEM WHERE %s" % \
items_in_book_coll
else: # The rest take all the items
total_items_sql = "SELECT COUNT(*) FROM crcITEM"
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT AVG(DATEDIFF(loaned_on, br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
if return_sql:
return "<ol><li>%s</li><li>Items loaned * 100 / Number of items <ul><li>\
Items loaned: %s </li><li>Number of items: %s</li></ul></li><li>100 - Items \
loaned on total number of items</li><li>%s</li></ol>" % \
(loans_sql % param, items_loaned_sql % param, total_items_sql, avg_sql % param)
loans = run_sql(loans_sql, param)[0][0]
items_loaned = run_sql(items_loaned_sql, param)[0][0]
if total_items_sql:
total_items = run_sql(total_items_sql)[0][0]
else:
total_items = 0
if total_items == 0:
loaned_on_total = 0
never_loaned_on_total = 0
else:
# Number of items loaned on the total number of items:
loaned_on_total = float(items_loaned) * 100 / float(total_items)
# Number of items never loaned on the total number of items:
never_loaned_on_total = 100L - loaned_on_total
avg = run_sql(avg_sql, param)[0][0]
if avg:
avg = float(avg)
else:
avg = 0L
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_where = []
param = []
sql_from = ""
if 'udc' in args and args['udc'] != '':
sql_where.append("i." + _check_udc_value_where())
param.append(_get_udc_truncated(args['udc']))
if 'loan_period' in args and args['loan_period'] != '':
sql_where.append("loan_period = %s")
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where.append("i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)")
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br"
sql_where.append("br.id=i.id_bibrec AND br.creation_date LIKE %s")
param.append('%%%s%%' % args['creation_date'])
if sql_where:
sql_where = "WHERE %s AND" % " AND ".join(sql_where)
else:
sql_where = "WHERE"
param = tuple(param + [lower, upper])
# SQL for both queries
check_num_loans = "HAVING "
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "HAVING ":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
# Optimized to get all the data in only one query (not call get_fieldvalues several times)
mldocs_sql = "SELECT i.id_bibrec, COUNT(*) \
FROM crcLOAN l, crcITEM i%s %s l.barcode=i.barcode AND type = 'normal' AND \
loaned_on > %%s AND loaned_on < %%s GROUP BY i.id_bibrec %s" % \
(sql_from, sql_where, check_num_loans)
limit_n = ""
if limit > 0:
limit_n = "LIMIT %d" % limit
nldocs_sql = "SELECT id_bibrec, COUNT(*) FROM crcITEM i%s %s \
barcode NOT IN (SELECT id_bibrec FROM crcLOAN WHERE loaned_on > %%s AND \
loaned_on < %%s AND type = 'normal') GROUP BY id_bibrec ORDER BY COUNT(*) DESC %s" % \
(sql_from, sql_where, limit_n)
items_sql = "SELECT id_bibrec, COUNT(*) items FROM crcITEM GROUP BY id_bibrec"
creation_date_sql = "SELECT creation_date FROM bibrec WHERE id=%s"
authors_sql = "SELECT bx.value FROM bib10x bx, bibrec_bib10x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '100__a' AND bibx.id_bibrec=%s"
title_sql = "SELECT GROUP_CONCAT(bx.value SEPARATOR ' ') value FROM bib24x bx, bibrec_bib24x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE %s AND bibx.id_bibrec=%s GROUP BY bibx.id_bibrec"
edition_sql = "SELECT bx.value FROM bib25x bx, bibrec_bib25x AS bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '250__a' AND bibx.id_bibrec=%s"
if return_sql:
return "Most loaned: %s<br \>Never loaned: %s" % \
(mldocs_sql % param, nldocs_sql % param)
mldocs = run_sql(mldocs_sql, param)
items = dict(run_sql(items_sql))
order_m = []
for mldoc in mldocs:
order_m.append([mldoc[0], mldoc[1], items[mldoc[0]], \
float(mldoc[1]) / float(items[mldoc[0]])])
order_m = sorted(order_m, key=itemgetter(3))
order_m.reverse()
# Check limit values
if limit > 0:
order_m = order_m[:limit]
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
for mldoc in order_m:
res.append(("Most loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', mldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (mldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (mldoc[0], ))),
mldoc[1], mldoc[2],
_check_empty_value(run_sql(creation_date_sql, (mldoc[0], )))))
nldocs = run_sql(nldocs_sql, param)
for nldoc in nldocs:
res.append(("Not loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', nldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (nldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (nldoc[0], ))),
0, items[nldoc[0]],
_check_empty_value(run_sql(creation_date_sql, (nldoc[0], )))))
# nldocs = run_sql(nldocs_sql, param_n)
return (res)
def get_keyevent_renewals_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.id_bibrec, SUM(number_of_renewals) %s %s \
GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC %s" \
% (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql(sql, param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args, return_sql=False):
"""
Data:
- Number of overdue returns in a timespan
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
sql = "SELECT COUNT(*) FROM crcLOAN l WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on IS NULL OR returned_on > due_date)"
if return_sql:
return sql % (lower, upper)
return ((run_sql(sql, (lower, upper))[0][0], ), )
def get_keyevent_trend_returns_percentage(args, return_sql=False):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine overdue returns:
overdue = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL \
AND (returned_on IS NULL OR returned_on > due_date)",
dates_range_param="loaned_on")
# SQL to determine all returns:
total = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL",
dates_range_param="loaned_on")
# Compute the trend for both types
o_trend = _get_keyevent_trend(args, overdue,
return_sql=return_sql, sql_text="Overdue: %s")
t_trend = _get_keyevent_trend(args, total,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (o_trend, t_trend)
return [(o_trend[i][0], (o_trend[i][1], t_trend[i][1]))
for i in range(len(o_trend))]
def get_keyevent_ill_requests_statistics(args, return_sql=False):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 2 weeks after the date of request
creation on a timespan
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a timespan
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a timespan
Filter by
- in a specified time span
- by type of document (book or article)
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
requests_sql = "SELECT COUNT(*) %s %s" % (sql_from, sql_where)
satrequests_sql = "SELECT COUNT(*) %s %s \
AND arrival_date IS NOT NULL AND \
DATEDIFF(arrival_date, period_of_interest_from) < 14 " % (sql_from, sql_where)
avgdel_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, period_of_interest_from, arrival_date)) %s %s \
AND arrival_date IS NOT NULL" % (sql_from, sql_where)
avgsup_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, request_date, arrival_date)) %s %s \
AND arrival_date IS NOT NULL \
AND request_date IS NOT NULL" % (sql_from, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(requests_sql % param, satrequests_sql % param,
avgdel_sql % param, avgsup_sql % param)
# Number of requests:
requests = run_sql(requests_sql, param)[0][0]
# Number of satisfied ILL requests 2 weeks after the date of request creation:
satrequests = run_sql(satrequests_sql, param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql(avgdel_sql, param)[0][0]
if avgdel:
avgdel = float(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql(avgsup_sql, param)[0][0]
if avgsup:
avgsup = float(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE status != '%s' AND request_date > %%s AND request_date < %%s " \
% CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s "
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT ill.id, item_info %s %s %s" % (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Id", "Title", "Author", "Edition")]
for req_id, item_info in run_sql(sql, param):
item_info = eval(item_info)
try:
res.append((req_id, item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
pass
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args, return_sql=False):
"""
Returns the number of satisfied ILL requests 2 weeks after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
sql_from = "crcILLREQUEST ill "
sql_where = ""
param = []
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
satisfied = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() AND \
(arrival_date IS NULL OR arrival_date < ADDDATE(request_date, 14)) " + sql_where)
# SQL to determine all ILL requests:
total = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() "+ sql_where)
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, satisfied, extra_param=param,
return_sql=return_sql, sql_text="Satisfied: %s")
t_trend = _get_keyevent_trend(args, total, extra_param=param,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, t_trend)
return [(s_trend[i][0], (s_trend[i][1], t_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args, return_sql=False):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
# Number of new items:
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
if return_sql:
return "Total: %s <br />New: %s" % (items_sql % tuple(param), new_items_sql % tuple(param + [lower, upper]))
return ((run_sql(items_sql, tuple(param))[0][0], ), (run_sql(new_items_sql, tuple(param + [lower, upper]))[0][0], ))
def get_keyevent_items_lists(args, return_sql=False, limit=50):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.barcode, i.id_bibrec %s %s %s" % (sql_from, sql_where, limit)
if len(param) == 0:
sqlres = run_sql(sql)
else:
sqlres = run_sql(sql, tuple(param))
sql = sql % param
if return_sql:
return sql
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args, return_sql=False):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
param = tuple(param)
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = "SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND \
DATEDIFF(ws.creation_time, lr.request_date) >= 7" % (sql_from, custom_table, sql_where)
# Number of successful hold requests transactions
succesful_holds = "SELECT COUNT(*) %s %s AND lr.status='%s'" % (sql_from, sql_where,
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE)
# Average time between the hold request date and the date of delivery document in a year
avg_sql = "SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date)) \
%s, %s ws %s AND ws.request_id=lr.id" % (sql_from, custom_table, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(holds % param, succesful_holds % param, avg_sql % param)
avg = run_sql(avg_sql, param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((run_sql(holds, param)[0][0], ),
(run_sql(succesful_holds, param)[0][0], ), (avg, ))
def get_keyevent_loan_request_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT lr.barcode %s %s GROUP BY barcode \
ORDER BY COUNT(*) DESC %s" % (sql_from, sql_where, limit)
if return_sql:
return sql
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql(sql, param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args, return_sql=False):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
param = (lower, upper, lower, upper)
# Total number of active users:
users = "SELECT COUNT(DISTINCT user) FROM ((SELECT id_crcBORROWER user %s %s) \
UNION (SELECT id_crcBORROWER user %s %s)) res" % \
(sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan)
if return_sql:
return users % param
return ((run_sql(users, param)[0][0], ), )
def get_keyevent_user_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = (lower, upper, lower, upper)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans FROM crcILLREQUEST ill \
WHERE request_date > %%s AND request_date < %%s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans FROM crcLOAN l WHERE loaned_on > %%s AND \
loaned_on < %%s GROUP BY id_crcBORROWER)) res GROUP BY user ORDER BY SUM(trans) DESC \
%s" % (limit)
if return_sql:
return sql % param
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql(sql, param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals = run_sql("SELECT COUNT(*), \
SUM(number_of_renewals) \
FROM crcLOAN WHERE loaned_on > %s", (datefrom, ))[0]
returns = run_sql("SELECT COUNT(*) FROM crcLOAN \
WHERE returned_on!='0000-00-00 00:00:00' and loaned_on > %s", (datefrom, ))[0][0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
def get_last_updates():
"""
List date/time when the last updates where done (easy reading format).
@return: last indexing, last ranking, last sorting, last webcolling
@type: (datetime, datetime, datetime, datetime)
"""
try:
last_index = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM idxINDEX WHERE \
name="global"')[0][0])))
last_rank = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM rnkMETHOD ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
last_sort = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM bsrMETHODDATA ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(file_coll_last_update.read())))
file_coll_last_update.close()
# database not filled
except IndexError:
return ("", "", "", "")
return (last_index, last_rank, last_sort, last_coll)
def get_list_link(process, category=None):
"""
Builds the link for the list of records not indexed, ranked, sorted or
collected.
@param process: kind of process the records are waiting for (index, rank,
sort, collect)
@type process: str
@param category: specific sub-category of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount)
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: link text
@type: string
"""
if process == "index":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name=%s)', (category,))
elif process == "rank":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
rnkMETHOD WHERE name=%s)', (category,))
elif process == "sort":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
bsrMETHODDATA WHERE id_bsrMETHOD=(SELECT id \
FROM bsrMETHOD WHERE name=%s))', (category,))
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
coll_last_update = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = zip(get_modified_records_since(coll_last_update).tolist())
# build the link
if len(list_registers) == 0:
return "Up to date"
link = '<a href="' + CFG_SITE_URL + '/search?p='
for register in list_registers:
link += 'recid%3A' + str(register[0]) + '+or+'
# delete the last '+or+'
link = link[:len(link)-4]
link += '">' + str(len(list_registers)) + '</a>'
return link
def get_search_link(record_id):
"""
Auxiliar, builds the direct link for a given record.
@param record_id: record's id number
@type record_id: int
@return: link text
@type: string
"""
link = '<a href="' + CFG_SITE_URL + '/record/' + \
str(record_id) + '">Record [' + str(record_id) + ']</a>'
return link
def get_ingestion_matching_records(request=None, limit=25):
"""
Fetches all the records matching a given pattern, arranges them by last
modificaton date and returns a list.
@param request: requested pattern to match
@type request: str
@return: list of records matching a pattern,
(0,) if no request,
(-1,) if the request was invalid
@type: list
"""
if request==None or request=="":
return (0,)
try:
records = list(search_pattern(p=request))
except:
return (-1,)
if records == []:
return records
# order by most recent modification date
query = 'SELECT id FROM bibrec WHERE '
for r in records:
query += 'id="' + str(r) + '" OR '
query = query[:len(query)-4]
query += ' ORDER BY modification_date DESC LIMIT %s'
list_records = run_sql(query, (limit,))
final_list = []
for lr in list_records:
final_list.append(lr[0])
return final_list
def get_record_ingestion_status(record_id):
"""
Returns the amount of ingestion methods not updated yet to a given record.
If 0, the record is up to date.
@param record_id: record id number
@type record_id: int
@return: number of methods not updated for the record
@type: int
"""
counter = 0
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")', (record_id, ))[0][0]
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
rnkMETHOD ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
counter = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
bsrMETHODDATA ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = file_coll_last_update.read()
file_coll_last_update.close()
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND \
modification_date >\
%s', (record_id, last_coll,))[0][0]
return counter
def get_specific_ingestion_status(record_id, process, method=None):
"""
Returns whether a record is or not up to date for a given
process and method.
@param record_id: identification number of the record
@type record_id: int
@param process: kind of process the records may be waiting for (index,
rank, sort, collect)
@type process: str
@param method: specific sub-method of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: text: None if the record is up to date
Last time the method was updated if it is waiting
@type: date/time string
"""
exist = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s', (record_id, ))
if exist[0][0] == 0:
return "REG not in DB"
if process == "index":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM idxINDEX WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM idxINDEX WHERE \
name=%s', (method,))[0][0]
elif process == "rank":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM rnkMETHOD WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM rnkMETHOD WHERE \
name=%s', (method,))[0][0]
elif process == "sort":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s))', (record_id, method,))
last_time = run_sql ('SELECT last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s)', (method,))[0][0]
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_time = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s \
AND modification_date > %s',
(record_id, last_time,))
# no results means the register is up to date
if list_registers[0][0] == 0:
return None
else:
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(last_time)))
def get_title_ingestion(record_id, last_modification):
"""
Auxiliar, builds a direct link for a given record, with its last
modification date.
@param record_id: id number of the record
@type record_id: string
@param last_modification: date/time of the last modification
@type last_modification: string
@return: link text
@type: string
"""
return '<h3><a href="%s/record/%s">Record [%s] last modification: %s</a></h3>' \
% (CFG_SITE_URL, record_id, record_id, last_modification)
def get_record_last_modification (record_id):
"""
Returns the date/time of the last modification made to a given record.
@param record_id: id number of the record
@type record_id: int
@return: date/time of the last modification
@type: string
"""
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT modification_date FROM bibrec \
WHERE id=%s', (record_id,))[0][0])))
def get_general_status():
"""
Returns an aproximate amount of ingestions processes not aplied to new or
updated records, using the "global" category.
@return: number of processes not updated
@type: int
"""
return run_sql('SELECT COUNT(*) FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")')[0][0]
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
where = []
sql_param = [lower, upper]
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "" or not where:
where.append(wash_table_column_name(col_title))
elif col_bool == "and":
where.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
where.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
where.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
where.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql = _get_sql_query("creation_time", args['granularity'], tbl_name, " ".join(where))
return _get_trend_from_actions(run_sql(sql, tuple(sql_param)), 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%%s'" % wash_table_column_name(tbl_name), (lower,)] # kwalitee: disable=sql
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = CFG_JOURNAL_TAG.replace("%", "p")
# First get records of the year
recids = perform_request_search(p=query, of="id", wl=0)
# Then return list by tag
pub = get_most_popular_field_values(recids, tag)
if len(pub) == 0:
return []
if CFG_CERN_SITE:
total = sum([x[1] for x in pub])
else:
others = 0
total = 0
first_other = -1
for elem in pub:
total += elem[1]
if elem[1] < 2:
if first_other == -1:
first_other = pub.index(elem)
others += elem[1]
del pub[first_other:]
if others != 0:
pub.append(('Others', others))
pub.append(('TOTAL', total))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return False
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
from invenio.errorlib import register_exception
register_exception()
return False
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = len(labels)
width = 6 + float(numb_elem) / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (float(numb_elem) * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
return True
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if not trend or len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print(out)
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data steps')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
data.append([rows, trend[-1][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
data.append(trend[-1][1])
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
size = settings.get("size", "500,400").split(",")
title = cgi.escape(settings["title"].replace(" ", "")[:10])
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/vendors/flot/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/vendors/flot/jquery.flot.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/vendors/flot/jquery.flot.selection.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder%(title)s" style="width:%(width)spx;height:%(height)spx"></div></div>'+
'<div id="miniature%(title)s" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview%(title)s" style="width:%(hwidth)dpx;height:%(hheigth)dpx"></div>' +
'<p id="overviewLegend%(title)s" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate%(title)s(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime() - (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData%(title)s() {""" % \
{'site': CFG_SITE_URL, 'width': size[0], 'height': size[1], 'hwidth': int(size[0]) / 2,
'hheigth': int(size[1]) / 2, 'title': title}
if(len(trend) > 1):
granularity_td = (_to_datetime(trend[1][0], '%Y-%m-%d %H:%M:%S') -
_to_datetime(trend[0][0], '%Y-%m-%d %H:%M:%S'))
else:
granularity_td = datetime.timedelta()
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += ", [parseDate%s('%s'), %d]];\n" % (title,
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S')+ granularity_td,
trend[-1][1][col])
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """, [parseDate%s("%s"), %d]];
return [d1];
}
""" % (title, _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') +
granularity_td, trend[-1][1])
# Set options
tics = """yaxis: {
tickDecimals : 0
},"""
if settings["xtic_format"] != '':
current = _to_datetime(maxx, '%Y-%m-%d %H:%M:%S')
next = current + granularity_td
if (granularity_td.seconds + granularity_td.days * 24 * 3600) > 2592000:
next = current.replace(day=31)
tics += 'xaxis: { mode:"time",min:parseDate%s("%s"),max:parseDate%s("%s")},'\
% (title, _to_datetime(minx, '%Y-%m-%d %H:%M:%S'), title, next)
out += """var options%s ={
series: {
lines: { steps: true, fill: true},
points: { show: false }
},
legend: {show: false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % (title, tics, )
# Write the plot method in javascript
out += """var startData%(title)s = getData%(title)s();
var plot%(title)s = $.plot($("#placeholder%(title)s"), startData%(title)s, options%(title)s);
// setup overview
var overview%(title)s = $.plot($("#overview%(title)s"), startData%(title)s, {
legend: { show: true, container: $("#overviewLegend%(title)s") },
series: {
lines: { steps: true, fill: true, lineWidth: 1},
shadowSize: 0
},
%(tics)s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % {"title": title, "tics": tics}
# Tooltip and zoom
out += """
function showTooltip%(title)s(x, y, contents) {
$('<div id="tooltip%(title)s">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint%(title)s = null;
$("#placeholder%(title)s").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint%(title)s != item.datapoint) {
previousPoint%(title)s = item.datapoint;
$("#tooltip%(title)s").remove();
var y = item.datapoint[1];
showTooltip%(title)s(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip%(title)s").remove();
previousPoint%(title)s = null;
}
});
$("#placeholder%(title)s").bind("plotclick", function (event, pos, item) {
if (item) {
plot%(title)s.highlight(item.series, item.datapoint);
}
});
// now connect the two
$("#placeholder%(title)s").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot%(title)s = $.plot($("#placeholder%(title)s"), getData%(title)s(ranges.xaxis.from, ranges.xaxis.to),
$.extend(true, {}, options%(title)s, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview%(title)s.setSelection(ranges, true);
});
$("#overview%(title)s").bind("plotselected", function (event, ranges) {
plot%(title)s.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>""" % {'title' : title}
open(path, 'w').write(out)
def get_numeric_stats(data, multiple):
""" Returns average, max and min values for data """
data = [x[1] for x in data]
if data == []:
return (0, 0, 0)
if multiple:
lists = []
for i in range(len(data[0])):
lists.append([x[i] for x in data])
return ([float(sum(x)) / len(x) for x in lists], [max(x) for x in lists],
[min(x) for x in lists])
else:
return (float(sum(data)) / len(data), max(data), min(data))
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print(out)
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_file(data, req):
"""
Exports the data to a file.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
try:
import xlwt
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
except ImportError:
csv_list = []
for row in data:
row = ['"%s"' % str(col) for col in row]
csv_list.append(",".join(row))
_export('text/csv', '\n'.join(csv_list), req)
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format, acumulative=False):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in dt_format
@type t_start: str
@param t_end: End time for the time domain in dt_format
@type t_end: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates = list(action_dates)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
vector = [(None, initial_value)]
try:
upcoming_action = action_dates.pop()
#Do not count null values (when year, month or day is 0)
if granularity in ("year", "month", "day") and upcoming_action[0] == 0:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
# Create an iterator running from the first day of activity
for current in _get_datetime_iter(t_start, granularity, dt_format):
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if acumulative:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
if upcoming_action[0] == {"year": current.year,
"month": current.month,
"day": current.day,
"hour": current.hour,
"minute": current.minute,
"second": current.second
}[granularity]:
actions_here += upcoming_action[1]
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
vector.append((current.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
# Make sure to stop the iteration at the end time
if {"year": current.year >= stop_at.year,
"month": current.month >= stop_at.month and current.year == stop_at.year,
"day": current.day >= stop_at.day and current.month == stop_at.month,
"hour": current.hour >= stop_at.hour and current.day == stop_at.day,
"minute": current.minute >= stop_at.minute and current.hour == stop_at.hour,
"second": current.second >= stop_at.second and current.minute == stop_at.minute
}[granularity]:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_keyevent_trend(args, sql, initial_quantity=0, extra_param=[],
return_sql=False, sql_text='%s', acumulative=False):
"""
Returns the trend for the sql passed in the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = tuple([lower, upper] + extra_param)
if return_sql:
sql = sql % param
return sql_text % sql
return _get_trend_from_actions(run_sql(sql, param), initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative)
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' parameter
@type dt_format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [(CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, "Cancelled"),
(CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, "Claimed"),
(CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, "In process"),
(CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, "Not arrived"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, "On loan"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, "On order"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, "On shelf")] + \
[(status, status) for status in CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
if not CFG_CERN_SITE or not "CERN" in dat[0]: # do not add internal libraries for CERN site
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag + '%',))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag[:-1] + '%',))
if res:
return res[0][0]
return ''
def _get_collection_recids_for_sql_query(coll):
ids = get_collection_reclist(coll).tolist()
if len(ids) == 0:
return ""
return "id_bibrec IN %s" % str(ids).replace('[', '(').replace(']', ')')
def _check_udc_value_where():
return "id_bibrec IN (SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s) "
def _get_udc_truncated(udc):
if udc[-1] == '*':
return "%s%%" % udc[:-1]
if udc[0] == '*':
return "%%%s" % udc[1:]
return "%s" % udc
def _check_empty_value(value):
if len(value) == 0:
return ""
else:
return value[0][0]
def _get_granularity_sql_functions(granularity):
try:
return {
"year": ("YEAR",),
"month": ("YEAR", "MONTH",),
"day": ("MONTH", "DAY",),
"hour": ("DAY", "HOUR",),
"minute": ("HOUR", "MINUTE",),
"second": ("MINUTE", "SECOND")
}[granularity]
except KeyError:
return ("MONTH", "DAY",)
def _get_sql_query(creation_time_name, granularity, tables_from, conditions="",
extra_select="", dates_range_param="", group_by=True, count=True):
if len(dates_range_param) == 0:
dates_range_param = creation_time_name
conditions = "%s > %%s AND %s < %%s %s" % (dates_range_param, dates_range_param,
len(conditions) > 0 and "AND %s" % conditions or "")
values = {'creation_time_name': creation_time_name,
'granularity_sql_function': _get_granularity_sql_functions(granularity)[-1],
'count': count and ", COUNT(*)" or "",
'tables_from': tables_from,
'conditions': conditions,
'extra_select': extra_select,
'group_by': ""}
if group_by:
values['group_by'] = "GROUP BY "
for fun in _get_granularity_sql_functions(granularity):
values['group_by'] += "%s(%s), " % (fun, creation_time_name)
values['group_by'] = values['group_by'][:-2]
return "SELECT %(granularity_sql_function)s(%(creation_time_name)s) %(count)s %(extra_select)s \
FROM %(tables_from)s WHERE %(conditions)s \
%(group_by)s \
ORDER BY %(creation_time_name)s DESC" % values
| 0.002767 |
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import datetime
import math
import pymongo
import time
from girder.constants import AccessType, SortDir
from girder.models.model_base import Model
from girder import logger
class Annotationelement(Model):
bboxKeys = {
'left': ('bbox.highx', '$gte'),
'right': ('bbox.lowx', '$lt'),
'top': ('bbox.highy', '$gte'),
'bottom': ('bbox.lowy', '$lt'),
'low': ('bbox.highz', '$gte'),
'high': ('bbox.lowz', '$lt'),
'minimumSize': ('bbox.size', '$gte'),
'size': ('bbox.size', None),
'details': ('bbox.details', None),
}
def initialize(self):
self.name = 'annotationelement'
self.ensureIndices([
'annotationId',
'_version',
([
('annotationId', SortDir.ASCENDING),
('bbox.lowx', SortDir.DESCENDING),
('bbox.highx', SortDir.ASCENDING),
('bbox.size', SortDir.DESCENDING),
], {}),
([
('annotationId', SortDir.ASCENDING),
('bbox.size', SortDir.DESCENDING),
], {}),
([
('annotationId', SortDir.ASCENDING),
('_version', SortDir.DESCENDING),
('element.group', SortDir.ASCENDING),
], {}),
([
('created', SortDir.ASCENDING),
('_version', SortDir.ASCENDING),
], {}),
])
self.exposeFields(AccessType.READ, (
'_id', '_version', 'annotationId', 'created', 'element'))
self.versionId = None
def getNextVersionValue(self):
"""
Maintain a version number. This is a single sequence that can be used
to ensure we have the correct set of elements for an annotation.
:returns: an integer version number that is strictly increasing.
"""
version = None
if self.versionId is not None:
version = self.collection.find_one_and_update(
{'_id': self.versionId},
{'$inc': {'_version': 1}})
if version is None:
versionObject = self.collection.find_one(
{'annotationId': 'version_sequence'})
if versionObject is None:
startingId = self.collection.find_one({}, sort=[('_version', SortDir.DESCENDING)])
if startingId:
startingId = startingId['_version'] + 1
else:
startingId = 0
self.versionId = self.collection.insert_one(
{'annotationId': 'version_sequence', '_version': startingId}
).inserted_id
else:
self.versionId = versionObject['_id']
version = self.collection.find_one_and_update(
{'_id': self.versionId},
{'$inc': {'_version': 1}})
return version['_version']
def getElements(self, annotation, region=None):
"""
Given an annotation, fetch the elements from the database and add them
to it.
When a region is used to request specific element, the following
keys can be specified:
left, right, top, bottom, low, high: the spatial area where
elements are located, all in pixels. If an element's bounding box is
at least partially within the requested area, that element is included.
minimumSize: the minimum size of an element to return.
sort, sortdir: standard sort options. The sort key can include
size and details.
limit: limit the total number of elements by this value. Defaults
to no limit.
offset: the offset within the query to start returning values. If
maxDetails is used, to get subsequent sets of elements, the offset
needs to be increased by the actual number of elements returned from a
previous query, which will vary based on the details of the elements.
maxDetails: if specified, limit the total number of elements by the
sum of their details values. This is applied in addition to limit.
The sum of the details values of the elements may exceed maxDetails
slightly (the sum of all but the last element will be less than
maxDetails, but the last element may exceed the value).
centroids: if specified and true, only return the id, center of the
bounding box, and bounding box size for each element.
:param annotation: the annotation to get elements for. Modified.
:param region: if present, a dictionary restricting which annotations
are returned.
"""
annotation['_elementQuery'] = {}
annotation['annotation']['elements'] = [
element for element in self.yieldElements(
annotation, region, annotation['_elementQuery'])]
def yieldElements(self, annotation, region=None, info=None):
"""
Given an annotation, fetch the elements from the database.
When a region is used to request specific element, the following
keys can be specified:
left, right, top, bottom, low, high: the spatial area where
elements are located, all in pixels. If an element's bounding box is
at least partially within the requested area, that element is included.
minimumSize: the minimum size of an element to return.
sort, sortdir: standard sort options. The sort key can include
size and details.
limit: limit the total number of elements by this value. Defaults
to no limit.
offset: the offset within the query to start returning values. If
maxDetails is used, to get subsequent sets of elements, the offset
needs to be increased by the actual number of elements returned from a
previous query, which will vary based on the details of the elements.
maxDetails: if specified, limit the total number of elements by the
sum of their details values. This is applied in addition to limit.
The sum of the details values of the elements may exceed maxDetails
slightly (the sum of all but the last element will be less than
maxDetails, but the last element may exceed the value).
centroids: if specified and true, only return the id, center of the
bounding box, and bounding box size for each element.
:param annotation: the annotation to get elements for. Modified.
:param region: if present, a dictionary restricting which annotations
are returned.
:param info: an optional dictionary that will be modified with
additional query information, including count (total number of
available elements), returned (number of elements in response),
maxDetails (as specified by the region dictionary), details (sum of
details returned), limit (as specified by region), centroids (a
boolean based on the region specification).
:returns: a list of elements. If centroids were requested, each entry
is a list with str(id), x, y, size. Otherwise, each entry is the
element record.
"""
info = info if info is not None else {}
region = region or {}
query = {
'annotationId': annotation.get('_annotationId', annotation['_id']),
'_version': annotation['_version']
}
for key in region:
if key in self.bboxKeys and self.bboxKeys[key][1]:
if self.bboxKeys[key][1] == '$gte' and float(region[key]) <= 0:
continue
query[self.bboxKeys[key][0]] = {
self.bboxKeys[key][1]: float(region[key])}
if region.get('sort') in self.bboxKeys:
sortkey = self.bboxKeys[region['sort']][0]
else:
sortkey = region.get('sort') or '_id'
sortdir = int(region['sortdir']) if region.get('sortdir') else SortDir.ASCENDING
limit = int(region['limit']) if region.get('limit') else 0
maxDetails = int(region.get('maxDetails') or 0)
queryLimit = maxDetails if maxDetails and (not limit or maxDetails < limit) else limit
offset = int(region['offset']) if region.get('offset') else 0
logger.debug('element query %r for %r', query, region)
fields = {'_id': True, 'element': True, 'bbox.details': True}
centroids = str(region.get('centroids')).lower() == 'true'
if centroids:
# fields = {'_id': True, 'element': True, 'bbox': True}
fields = {
'_id': True,
'element.id': True,
'bbox': True}
proplist = []
propskeys = ['type', 'fillColor', 'lineColor', 'lineWidth', 'closed']
for key in propskeys:
fields['element.%s' % key] = True
props = {}
info['centroids'] = True
info['props'] = proplist
info['propskeys'] = propskeys
elementCursor = self.find(
query=query, sort=[(sortkey, sortdir)], limit=queryLimit,
offset=offset, fields=fields)
info.update({
'count': elementCursor.count(),
'offset': offset,
'filter': query,
'sort': [sortkey, sortdir],
})
details = count = 0
if maxDetails:
info['maxDetails'] = maxDetails
if limit:
info['limit'] = limit
for entry in elementCursor:
element = entry['element']
element.setdefault('id', entry['_id'])
if centroids:
bbox = entry.get('bbox')
if not bbox or 'lowx' not in bbox or 'size' not in bbox:
continue
prop = tuple(element.get(key) for key in propskeys)
if prop not in props:
props[prop] = len(props)
proplist.append(list(prop))
yield [
str(element['id']),
(bbox['lowx'] + bbox['highx']) / 2,
(bbox['lowy'] + bbox['highy']) / 2,
bbox['size'] if entry.get('type') != 'point' else 0,
props[prop]
]
details += 1
else:
yield element
details += entry.get('bbox', {}).get('details', 1)
count += 1
if maxDetails and details >= maxDetails:
break
info['returned'] = count
info['details'] = details
def removeWithQuery(self, query):
"""
Remove all documents matching a given query from the collection.
For safety reasons, you may not pass an empty query.
Note: this does NOT return a Mongo DeleteResult.
:param query: The search query for documents to delete,
see general MongoDB docs for "find()"
:type query: dict
"""
assert query
self.collection.bulk_write([pymongo.DeleteMany(query)], ordered=False)
def removeElements(self, annotation):
"""
Remove all elements related to the specified annotation.
:param annotation: the annotation to remove elements from.
"""
self.removeWithQuery({'annotationId': annotation['_id']})
def removeOldElements(self, annotation, oldversion=None):
"""
Remove all elements related to the specified annoation.
:param annotation: the annotation to remove elements from.
:param oldversion: if present, remove versions up to this number. If
none, remove versions earlier than the version in
the annotation record.
"""
query = {'annotationId': annotation['_id']}
if oldversion is None or oldversion >= annotation['_version']:
query['_version'] = {'$lt': annotation['_version']}
else:
query['_version'] = {'$lte': oldversion}
self.removeWithQuery(query)
def _boundingBox(self, element):
"""
Compute bounding box information for an annotation element.
This computes the enclosing bounding box of an element. For points, an
small non-zero-area region is used centered on the point.
Additionally, a metric is stored for the complexity of the element.
The size of the bounding box's x-y diagonal is also stored.
:param element: the element to compute the bounding box for.
:returns: the bounding box dictionary. This contains 'lowx', 'lowy',
'lowz', 'highx', 'highy', and 'highz, which are the minimum and
maximum values in each dimension, 'details' with the complexity of
the element, and 'size' with the x-y diagonal size of the bounding
box.
"""
bbox = {}
if 'points' in element:
bbox['lowx'] = min([p[0] for p in element['points']])
bbox['lowy'] = min([p[1] for p in element['points']])
bbox['lowz'] = min([p[2] for p in element['points']])
bbox['highx'] = max([p[0] for p in element['points']])
bbox['highy'] = max([p[1] for p in element['points']])
bbox['highz'] = max([p[2] for p in element['points']])
bbox['details'] = len(element['points'])
else:
center = element['center']
bbox['lowz'] = bbox['highz'] = center[2]
if 'width' in element:
w = element['width'] * 0.5
h = element['height'] * 0.5
if element.get('rotation'):
absin = abs(math.sin(element['rotation']))
abcos = abs(math.cos(element['rotation']))
w, h = max(abcos * w, absin * h), max(absin * w, abcos * h)
bbox['lowx'] = center[0] - w
bbox['lowy'] = center[1] - h
bbox['highx'] = center[0] + w
bbox['highy'] = center[1] + h
bbox['details'] = 4
elif 'radius' in element:
rad = element['radius']
bbox['lowx'] = center[0] - rad
bbox['lowy'] = center[1] - rad
bbox['highx'] = center[0] + rad
bbox['highy'] = center[1] + rad
bbox['details'] = 4
else:
# This is a fall back for points. Although they have no
# dimension, make the bounding box have some extent.
bbox['lowx'] = center[0] - 0.5
bbox['lowy'] = center[1] - 0.5
bbox['highx'] = center[0] + 0.5
bbox['highy'] = center[1] + 0.5
bbox['details'] = 1
bbox['size'] = (
(bbox['highy'] - bbox['lowy'])**2 +
(bbox['highx'] - bbox['lowx'])**2) ** 0.5
# we may want to store perimeter or area as that could help when we
# simplify to points
return bbox
def updateElements(self, annotation):
"""
Given an annotation, extract the elements from it and update the
database of them.
:param annotation: the annotation to save elements for. Modified.
"""
startTime = time.time()
elements = annotation['annotation'].get('elements', [])
if not len(elements):
return
now = datetime.datetime.utcnow()
chunkSize = 100000
for chunk in range(0, len(elements), chunkSize):
chunkStartTime = time.time()
entries = [{
'annotationId': annotation['_id'],
'_version': annotation['_version'],
'created': now,
'bbox': self._boundingBox(element),
'element': element
} for element in elements[chunk:chunk + chunkSize]]
prepTime = time.time() - chunkStartTime
res = self.collection.insert_many(entries)
for pos, entry in enumerate(entries):
if 'id' not in entry['element']:
entry['element']['id'] = str(res.inserted_ids[pos])
# If the whole insert is slow, log information about it.
if time.time() - startTime > 10:
logger.info('insert %d elements in %4.2fs (prep time %4.2fs), done %d/%d' % (
len(entries), time.time() - chunkStartTime, prepTime,
chunk + len(entries), len(elements)))
if time.time() - startTime > 10:
logger.info('inserted %d elements in %4.2fs' % (
len(elements), time.time() - startTime))
def getElementGroupSet(self, annotation):
query = {
'annotationId': annotation.get('_annotationId', annotation['_id']),
'_version': annotation['_version']
}
groups = sorted([
group for group in self.collection.distinct('element.group', filter=query)
if isinstance(group, str)
])
query['element.group'] = None
if self.collection.find_one(query):
groups.append(None)
return groups
| 0.000388 |
# -*- coding: utf-8 -*-
"""
babel.numbers
~~~~~~~~~~~~~
CLDR Plural support. See UTS #35.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
from babel._compat import Decimal
_plural_tags = ('zero', 'one', 'two', 'few', 'many', 'other')
_fallback_tag = 'other'
def extract_operands(source):
"""Extract operands from a decimal, a float or an int, according to
`CLDR rules`_.
.. _`CLDR rules`: http://www.unicode.org/reports/tr35/tr35-33/tr35-numbers.html#Operands
"""
n = abs(source)
i = int(n)
if isinstance(n, float):
if i == n:
n = i
else:
# 2.6's Decimal cannot convert from float directly
if sys.version_info < (2, 7):
n = str(n)
n = Decimal(n)
if isinstance(n, Decimal):
dec_tuple = n.as_tuple()
exp = dec_tuple.exponent
fraction_digits = dec_tuple.digits[exp:] if exp < 0 else ()
trailing = ''.join(str(d) for d in fraction_digits)
no_trailing = trailing.rstrip('0')
v = len(trailing)
w = len(no_trailing)
f = int(trailing or 0)
t = int(no_trailing or 0)
else:
v = w = f = t = 0
return n, i, v, w, f, t
class PluralRule(object):
"""Represents a set of language pluralization rules. The constructor
accepts a list of (tag, expr) tuples or a dict of `CLDR rules`_. The
resulting object is callable and accepts one parameter with a positive or
negative number (both integer and float) for the number that indicates the
plural form for a string and returns the tag for the format:
>>> rule = PluralRule({'one': 'n is 1'})
>>> rule(1)
'one'
>>> rule(2)
'other'
Currently the CLDR defines these tags: zero, one, two, few, many and
other where other is an implicit default. Rules should be mutually
exclusive; for a given numeric value, only one rule should apply (i.e.
the condition should only be true for one of the plural rule elements.
.. _`CLDR rules`: http://www.unicode.org/reports/tr35/tr35-33/tr35-numbers.html#Language_Plural_Rules
"""
__slots__ = ('abstract', '_func')
def __init__(self, rules):
"""Initialize the rule instance.
:param rules: a list of ``(tag, expr)``) tuples with the rules
conforming to UTS #35 or a dict with the tags as keys
and expressions as values.
:raise RuleError: if the expression is malformed
"""
if isinstance(rules, dict):
rules = rules.items()
found = set()
self.abstract = []
for key, expr in sorted(list(rules)):
if key == 'other':
continue
if key not in _plural_tags:
raise ValueError('unknown tag %r' % key)
elif key in found:
raise ValueError('tag %r defined twice' % key)
found.add(key)
self.abstract.append((key, _Parser(expr).ast))
def __repr__(self):
rules = self.rules
return '<%s %r>' % (
type(self).__name__,
', '.join(['%s: %s' % (tag, rules[tag]) for tag in _plural_tags
if tag in rules])
)
@classmethod
def parse(cls, rules):
"""Create a `PluralRule` instance for the given rules. If the rules
are a `PluralRule` object, that object is returned.
:param rules: the rules as list or dict, or a `PluralRule` object
:raise RuleError: if the expression is malformed
"""
if isinstance(rules, cls):
return rules
return cls(rules)
@property
def rules(self):
"""The `PluralRule` as a dict of unicode plural rules.
>>> rule = PluralRule({'one': 'n is 1'})
>>> rule.rules
{'one': 'n is 1'}
"""
_compile = _UnicodeCompiler().compile
return dict([(tag, _compile(ast)) for tag, ast in self.abstract])
tags = property(lambda x: frozenset([i[0] for i in x.abstract]), doc="""
A set of explicitly defined tags in this rule. The implicit default
``'other'`` rules is not part of this set unless there is an explicit
rule for it.""")
def __getstate__(self):
return self.abstract
def __setstate__(self, abstract):
self.abstract = abstract
def __call__(self, n):
if not hasattr(self, '_func'):
self._func = to_python(self)
return self._func(n)
def to_javascript(rule):
"""Convert a list/dict of rules or a `PluralRule` object into a JavaScript
function. This function depends on no external library:
>>> to_javascript({'one': 'n is 1'})
"(function(n) { return (n == 1) ? 'one' : 'other'; })"
Implementation detail: The function generated will probably evaluate
expressions involved into range operations multiple times. This has the
advantage that external helper functions are not required and is not a
big performance hit for these simple calculations.
:param rule: the rules as list or dict, or a `PluralRule` object
:raise RuleError: if the expression is malformed
"""
to_js = _JavaScriptCompiler().compile
result = ['(function(n) { return ']
for tag, ast in PluralRule.parse(rule).abstract:
result.append('%s ? %r : ' % (to_js(ast), tag))
result.append('%r; })' % _fallback_tag)
return ''.join(result)
def to_python(rule):
"""Convert a list/dict of rules or a `PluralRule` object into a regular
Python function. This is useful in situations where you need a real
function and don't are about the actual rule object:
>>> func = to_python({'one': 'n is 1', 'few': 'n in 2..4'})
>>> func(1)
'one'
>>> func(3)
'few'
>>> func = to_python({'one': 'n in 1,11', 'few': 'n in 3..10,13..19'})
>>> func(11)
'one'
>>> func(15)
'few'
:param rule: the rules as list or dict, or a `PluralRule` object
:raise RuleError: if the expression is malformed
"""
namespace = {
'IN': in_range_list,
'WITHIN': within_range_list,
'MOD': cldr_modulo,
'extract_operands': extract_operands,
}
to_python_func = _PythonCompiler().compile
result = [
'def evaluate(n):',
' n, i, v, w, f, t = extract_operands(n)',
]
for tag, ast in PluralRule.parse(rule).abstract:
# the str() call is to coerce the tag to the native string. It's
# a limited ascii restricted set of tags anyways so that is fine.
result.append(' if (%s): return %r' % (to_python_func(ast), str(tag)))
result.append(' return %r' % _fallback_tag)
code = compile('\n'.join(result), '<rule>', 'exec')
eval(code, namespace)
return namespace['evaluate']
def to_gettext(rule):
"""The plural rule as gettext expression. The gettext expression is
technically limited to integers and returns indices rather than tags.
>>> to_gettext({'one': 'n is 1', 'two': 'n is 2'})
'nplurals=3; plural=((n == 1) ? 0 : (n == 2) ? 1 : 2)'
:param rule: the rules as list or dict, or a `PluralRule` object
:raise RuleError: if the expression is malformed
"""
rule = PluralRule.parse(rule)
used_tags = rule.tags | set([_fallback_tag])
_compile = _GettextCompiler().compile
_get_index = [tag for tag in _plural_tags if tag in used_tags].index
result = ['nplurals=%d; plural=(' % len(used_tags)]
for tag, ast in rule.abstract:
result.append('%s ? %d : ' % (_compile(ast), _get_index(tag)))
result.append('%d)' % _get_index(_fallback_tag))
return ''.join(result)
def in_range_list(num, range_list):
"""Integer range list test. This is the callback for the "in" operator
of the UTS #35 pluralization rule language:
>>> in_range_list(1, [(1, 3)])
True
>>> in_range_list(3, [(1, 3)])
True
>>> in_range_list(3, [(1, 3), (5, 8)])
True
>>> in_range_list(1.2, [(1, 4)])
False
>>> in_range_list(10, [(1, 4)])
False
>>> in_range_list(10, [(1, 4), (6, 8)])
False
"""
return num == int(num) and within_range_list(num, range_list)
def within_range_list(num, range_list):
"""Float range test. This is the callback for the "within" operator
of the UTS #35 pluralization rule language:
>>> within_range_list(1, [(1, 3)])
True
>>> within_range_list(1.0, [(1, 3)])
True
>>> within_range_list(1.2, [(1, 4)])
True
>>> within_range_list(8.8, [(1, 4), (7, 15)])
True
>>> within_range_list(10, [(1, 4)])
False
>>> within_range_list(10.5, [(1, 4), (20, 30)])
False
"""
return any(num >= min_ and num <= max_ for min_, max_ in range_list)
def cldr_modulo(a, b):
"""Javaish modulo. This modulo operator returns the value with the sign
of the dividend rather than the divisor like Python does:
>>> cldr_modulo(-3, 5)
-3
>>> cldr_modulo(-3, -5)
-3
>>> cldr_modulo(3, 5)
3
"""
reverse = 0
if a < 0:
a *= -1
reverse = 1
if b < 0:
b *= -1
rv = a % b
if reverse:
rv *= -1
return rv
class RuleError(Exception):
"""Raised if a rule is malformed."""
_VARS = 'nivwft'
_RULES = [
(None, re.compile(r'\s+(?u)')),
('word', re.compile(r'\b(and|or|is|(?:with)?in|not|mod|[{0}])\b'
.format(_VARS))),
('value', re.compile(r'\d+')),
('symbol', re.compile(r'%|,|!=|=')),
('ellipsis', re.compile(r'\.\.'))
]
def tokenize_rule(s):
s = s.split('@')[0]
result = []
pos = 0
end = len(s)
while pos < end:
for tok, rule in _RULES:
match = rule.match(s, pos)
if match is not None:
pos = match.end()
if tok:
result.append((tok, match.group()))
break
else:
raise RuleError('malformed CLDR pluralization rule. '
'Got unexpected %r' % s[pos])
return result[::-1]
def test_next_token(tokens, type_, value=None):
return tokens and tokens[-1][0] == type_ and \
(value is None or tokens[-1][1] == value)
def skip_token(tokens, type_, value=None):
if test_next_token(tokens, type_, value):
return tokens.pop()
def value_node(value):
return 'value', (value, )
def ident_node(name):
return name, ()
def range_list_node(range_list):
return 'range_list', range_list
def negate(rv):
return 'not', (rv,)
class _Parser(object):
"""Internal parser. This class can translate a single rule into an abstract
tree of tuples. It implements the following grammar::
condition = and_condition ('or' and_condition)*
('@integer' samples)?
('@decimal' samples)?
and_condition = relation ('and' relation)*
relation = is_relation | in_relation | within_relation
is_relation = expr 'is' ('not')? value
in_relation = expr (('not')? 'in' | '=' | '!=') range_list
within_relation = expr ('not')? 'within' range_list
expr = operand (('mod' | '%') value)?
operand = 'n' | 'i' | 'f' | 't' | 'v' | 'w'
range_list = (range | value) (',' range_list)*
value = digit+
digit = 0|1|2|3|4|5|6|7|8|9
range = value'..'value
samples = sampleRange (',' sampleRange)* (',' ('…'|'...'))?
sampleRange = decimalValue '~' decimalValue
decimalValue = value ('.' value)?
- Whitespace can occur between or around any of the above tokens.
- Rules should be mutually exclusive; for a given numeric value, only one
rule should apply (i.e. the condition should only be true for one of
the plural rule elements).
- The in and within relations can take comma-separated lists, such as:
'n in 3,5,7..15'.
- Samples are ignored.
The translator parses the expression on instanciation into an attribute
called `ast`.
"""
def __init__(self, string):
self.tokens = tokenize_rule(string)
self.ast = self.condition()
if self.tokens:
raise RuleError('Expected end of rule, got %r' %
self.tokens[-1][1])
def expect(self, type_, value=None, term=None):
token = skip_token(self.tokens, type_, value)
if token is not None:
return token
if term is None:
term = repr(value is None and type_ or value)
if not self.tokens:
raise RuleError('expected %s but end of rule reached' % term)
raise RuleError('expected %s but got %r' % (term, self.tokens[-1][1]))
def condition(self):
op = self.and_condition()
while skip_token(self.tokens, 'word', 'or'):
op = 'or', (op, self.and_condition())
return op
def and_condition(self):
op = self.relation()
while skip_token(self.tokens, 'word', 'and'):
op = 'and', (op, self.relation())
return op
def relation(self):
left = self.expr()
if skip_token(self.tokens, 'word', 'is'):
return skip_token(self.tokens, 'word', 'not') and 'isnot' or 'is', \
(left, self.value())
negated = skip_token(self.tokens, 'word', 'not')
method = 'in'
if skip_token(self.tokens, 'word', 'within'):
method = 'within'
else:
if not skip_token(self.tokens, 'word', 'in'):
if negated:
raise RuleError('Cannot negate operator based rules.')
return self.newfangled_relation(left)
rv = 'relation', (method, left, self.range_list())
return negate(rv) if negated else rv
def newfangled_relation(self, left):
if skip_token(self.tokens, 'symbol', '='):
negated = False
elif skip_token(self.tokens, 'symbol', '!='):
negated = True
else:
raise RuleError('Expected "=" or "!=" or legacy relation')
rv = 'relation', ('in', left, self.range_list())
return negate(rv) if negated else rv
def range_or_value(self):
left = self.value()
if skip_token(self.tokens, 'ellipsis'):
return left, self.value()
else:
return left, left
def range_list(self):
range_list = [self.range_or_value()]
while skip_token(self.tokens, 'symbol', ','):
range_list.append(self.range_or_value())
return range_list_node(range_list)
def expr(self):
word = skip_token(self.tokens, 'word')
if word is None or word[1] not in _VARS:
raise RuleError('Expected identifier variable')
name = word[1]
if skip_token(self.tokens, 'word', 'mod'):
return 'mod', ((name, ()), self.value())
elif skip_token(self.tokens, 'symbol', '%'):
return 'mod', ((name, ()), self.value())
return ident_node(name)
def value(self):
return value_node(int(self.expect('value')[1]))
def _binary_compiler(tmpl):
"""Compiler factory for the `_Compiler`."""
return lambda self, l, r: tmpl % (self.compile(l), self.compile(r))
def _unary_compiler(tmpl):
"""Compiler factory for the `_Compiler`."""
return lambda self, x: tmpl % self.compile(x)
class _Compiler(object):
"""The compilers are able to transform the expressions into multiple
output formats.
"""
def compile(self, arg):
op, args = arg
return getattr(self, 'compile_' + op)(*args)
compile_n = lambda x: 'n'
compile_i = lambda x: 'i'
compile_v = lambda x: 'v'
compile_w = lambda x: 'w'
compile_f = lambda x: 'f'
compile_t = lambda x: 't'
compile_value = lambda x, v: str(v)
compile_and = _binary_compiler('(%s && %s)')
compile_or = _binary_compiler('(%s || %s)')
compile_not = _unary_compiler('(!%s)')
compile_mod = _binary_compiler('(%s %% %s)')
compile_is = _binary_compiler('(%s == %s)')
compile_isnot = _binary_compiler('(%s != %s)')
def compile_relation(self, method, expr, range_list):
raise NotImplementedError()
class _PythonCompiler(_Compiler):
"""Compiles an expression to Python."""
compile_and = _binary_compiler('(%s and %s)')
compile_or = _binary_compiler('(%s or %s)')
compile_not = _unary_compiler('(not %s)')
compile_mod = _binary_compiler('MOD(%s, %s)')
def compile_relation(self, method, expr, range_list):
compile_range_list = '[%s]' % ','.join(
['(%s, %s)' % tuple(map(self.compile, range_))
for range_ in range_list[1]])
return '%s(%s, %s)' % (method.upper(), self.compile(expr),
compile_range_list)
class _GettextCompiler(_Compiler):
"""Compile into a gettext plural expression."""
def compile_relation(self, method, expr, range_list):
rv = []
expr = self.compile(expr)
for item in range_list[1]:
if item[0] == item[1]:
rv.append('(%s == %s)' % (
expr,
self.compile(item[0])
))
else:
min, max = map(self.compile, item)
rv.append('(%s >= %s && %s <= %s)' % (
expr,
min,
expr,
max
))
return '(%s)' % ' || '.join(rv)
class _JavaScriptCompiler(_GettextCompiler):
"""Compiles the expression to plain of JavaScript."""
# XXX: presently javascript does not support any of the
# fraction support and basically only deals with integers.
compile_i = lambda x: 'parseInt(n, 10)'
compile_v = lambda x: '0'
compile_w = lambda x: '0'
compile_f = lambda x: '0'
compile_t = lambda x: '0'
def compile_relation(self, method, expr, range_list):
code = _GettextCompiler.compile_relation(
self, method, expr, range_list)
if method == 'in':
expr = self.compile(expr)
code = '(parseInt(%s, 10) == %s && %s)' % (expr, expr, code)
return code
class _UnicodeCompiler(_Compiler):
"""Returns a unicode pluralization rule again."""
# XXX: this currently spits out the old syntax instead of the new
# one. We can change that, but it will break a whole bunch of stuff
# for users I suppose.
compile_is = _binary_compiler('%s is %s')
compile_isnot = _binary_compiler('%s is not %s')
compile_and = _binary_compiler('%s and %s')
compile_or = _binary_compiler('%s or %s')
compile_mod = _binary_compiler('%s mod %s')
def compile_not(self, relation):
return self.compile_relation(negated=True, *relation[1])
def compile_relation(self, method, expr, range_list, negated=False):
ranges = []
for item in range_list[1]:
if item[0] == item[1]:
ranges.append(self.compile(item[0]))
else:
ranges.append('%s..%s' % tuple(map(self.compile, item)))
return '%s%s %s %s' % (
self.compile(expr), negated and ' not' or '',
method, ','.join(ranges)
)
| 0.000875 |
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.email.templates` module."""
from __future__ import print_function
from __future__ import unicode_literals
from io import StringIO
from gettext import NullTranslations
from twisted.mail.smtp import Address
from twisted.trial import unittest
from bridgedb.email import templates
class EmailTemplatesTests(unittest.TestCase):
"""Unittests for :func:`b.e.templates`."""
def setUp(self):
self.t = NullTranslations(StringIO(unicode('test')))
self.client = Address('[email protected]')
self.answer = 'obfs3 1.1.1.1:1111\nobfs3 2.2.2.2:2222'
# This is the fingerprint of BridgeDB's offline, certification-only
# GnuPG key. It should be present in any responses to requests for our
# public keys.
self.offlineFingerprint = '7B78437015E63DF47BB1270ACBD97AA24E8E472E'
def shouldIncludeCommands(self, text):
self.assertSubstring('COMMANDs', text)
def shouldIncludeInstructions(self, text):
self.assertSubstring('Tor Browser', text)
def shouldIncludeBridges(self, text):
self.assertSubstring(self.answer, text)
self.assertSubstring('Here are your bridges:', text)
def shouldIncludeGreeting(self, text):
self.assertSubstring('Hey, blackhole!', text)
def shouldIncludeAutomationNotice(self, text):
self.assertSubstring('automated message', text)
def shouldIncludeKey(self, text):
self.assertSubstring('-----BEGIN PGP PUBLIC KEY BLOCK-----', text)
def shouldIncludeFooter(self, text):
self.assertSubstring('rainbows, unicorns, and sparkles', text)
def test_templates_addCommands(self):
text = templates.addCommands(self.t)
self.shouldIncludeCommands(text)
def test_templates_addGreeting(self):
text = templates.addGreeting(self.t, self.client.local)
self.shouldIncludeGreeting(text)
def test_templates_addGreeting_noClient(self):
text = templates.addGreeting(self.t, None)
self.assertSubstring('Hello, friend!', text)
def test_templates_addGreeting_withWelcome(self):
text = templates.addGreeting(self.t, self.client.local, welcome=True)
self.shouldIncludeGreeting(text)
self.assertSubstring('Welcome to BridgeDB!', text)
def test_templates_addGreeting_trueClient(self):
text = templates.addGreeting(self.t, True)
self.assertSubstring('Hey', text)
def test_templates_addGreeting_23Client(self):
text = templates.addGreeting(self.t, 23)
self.assertSubstring('Hey', text)
def test_templates_addHowto(self):
text = templates.addHowto(self.t)
self.shouldIncludeInstructions(text)
def test_templates_addBridgeAnswer(self):
text = templates.addBridgeAnswer(self.t, self.answer)
self.shouldIncludeBridges(text)
def test_templates_addFooter(self):
text = templates.addFooter(self.t, self.client)
self.shouldIncludeFooter(text)
def test_templates_buildAnswerMessage(self):
text = templates.buildAnswerMessage(self.t, self.client, self.answer)
self.assertSubstring(self.answer, text)
self.shouldIncludeAutomationNotice(text)
self.shouldIncludeCommands(text)
self.shouldIncludeFooter(text)
def test_templates_buildKeyMessage(self):
text = templates.buildKeyMessage(self.t, self.client)
self.assertSubstring(self.offlineFingerprint, text)
def test_templates_buildWelcomeText(self):
text = templates.buildWelcomeText(self.t, self.client)
self.shouldIncludeGreeting(text)
self.assertSubstring('Welcome to BridgeDB!', text)
self.shouldIncludeCommands(text)
self.shouldIncludeFooter(text)
def test_templates_buildSpamWarning(self):
text = templates.buildSpamWarning(self.t, self.client)
self.shouldIncludeGreeting(text)
self.shouldIncludeAutomationNotice(text)
self.shouldIncludeFooter(text)
| 0 |
# Copyright 2006 Joe Wreschnig <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 4275 2008-06-01 06:32:37Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = file(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = file(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = file(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
| 0.00096 |
# -*- coding: utf-8 -*-
'''
Virtual machine image management tools
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
# Set up logging
log = logging.getLogger(__name__)
def mount_image(location):
'''
Mount the named image and return the mount point
CLI Example:
.. code-block:: bash
salt '*' img.mount_image /tmp/foo
'''
if 'guestfs.mount' in __salt__:
return __salt__['guestfs.mount'](location)
elif 'qemu_nbd.init' in __salt__:
mnt = __salt__['qemu_nbd.init'](location)
if not mnt:
return ''
first = next(six.iterkeys(mnt))
__context__['img.mnt_{0}'.format(first)] = mnt
return first
return ''
# compatibility for api change
mnt_image = salt.utils.alias_function(mount_image, 'mnt_image')
def umount_image(mnt):
'''
Unmount an image mountpoint
CLI Example:
.. code-block:: bash
salt '*' img.umount_image /mnt/foo
'''
if 'qemu_nbd.clear' in __salt__:
if 'img.mnt_{0}'.format(mnt) in __context__:
__salt__['qemu_nbd.clear'](__context__['img.mnt_{0}'.format(mnt)])
return
__salt__['mount.umount'](mnt)
#def get_image(name):
# '''
# Download a vm image from a remote source and add it to the image cache
# system
# '''
# cache_dir = os.path.join(__salt__['config.option']('img.cache'), 'src')
# parse = urlparse(name)
# if __salt__['config.valid_file_proto'](parse.scheme):
# # Valid scheme to download
# dest = os.path.join(cache_dir, parse.netloc)
# sfn = __salt__['file.get_managed'](dest, None, name, )
def bootstrap(location, size, fmt):
'''
HIGHLY EXPERIMENTAL
Bootstrap a virtual machine image
location:
The location to create the image
size:
The size of the image to create in megabytes
fmt:
The image format, raw or qcow2
CLI Example:
.. code-block:: bash
salt '*' img.bootstrap /srv/salt-images/host.qcow 4096 qcow2
'''
location = __salt__['img.make_image'](location, size, fmt)
if not location:
return ''
nbd = __salt__['qemu_nbd.connect'](location)
__salt__['partition.mklabel'](nbd, 'msdos')
__salt__['partition.mkpart'](nbd, 'primary', 'ext4', 1, -1)
__salt__['partition.probe'](nbd)
__salt__['partition.mkfs']('{0}p1'.format(nbd), 'ext4')
mnt = __salt__['qemu_nbd.mount'](nbd)
#return __salt__['pkg.bootstrap'](nbd, mnt.iterkeys().next())
| 0.001147 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Electronic Invoice",
"version": "1.0",
"description": """
Manage the electronic invoice
=============================
The management of electronic invoice integrate the invoices with digital signatures and certificates usually in a PKI infastructure with xml messages to a webservices to generate and validate the electronic invoices.
Key Features
------------
* Add support to manage the webservices communication to generate and validate a electronic invoice
* Generate a abstract model to manage electronic invoices from several countries
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Financial",
"depends": [
"base_pki",
"account",
],
"data":[
"security/account_einvoice_security.xml",
"security/ir.model.access.csv",
"account_einvoice_workflow.xml",
"account_einvoice_view.xml",
"account_view.xml",
],
"demo_xml": [],
"active": False,
"installable": True,
"certificate" : "",
} | 0.00441 |
# -*- coding: utf-8 -*-
# MouseTrap
#
# Copyright 2009 Flavio Percoco Premoli
#
# This file is part of mouseTrap.
#
# MouseTrap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2 as published
# by the Free Software Foundation.
#
# mouseTrap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mouseTrap. If not, see <http://www.gnu.org/licenses/>.
""" Common MouseTrap Functions. """
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2008 Flavio Percoco Premoli."
__license__ = "GPLv2"
import os
import re
def get_py_list(dirlist):
"""
Checks for .py files on directories in dirlist
and removes the extensions.
Arguments:
- dirlist: The directories list.
"""
if not type(dirlist) is list:
dirlist = [dirlist]
reg = re.compile(r'([A-Za-z0-9]+)\.py$', re.DOTALL)
group = []
for dir in dirlist:
if not os.path.isdir(dir):
continue
group.append([ mod[0] for mod in [ reg.findall(f) for f in os.listdir("%s/" % dir) if "handler" not in f] if mod ])
return [] + [x for l in group for x in l]
| 0.01151 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import typing
import click
from . import echo
from . import env
from ._options import add_build_options, get_project
from snapcraft.internal import (
errors,
build_providers,
deprecations,
lifecycle,
lxd,
project_loader,
steps,
)
from snapcraft.project._sanity_checks import conduct_project_sanity_check
from snapcraft.project.errors import YamlValidationError
if typing.TYPE_CHECKING:
from snapcraft.internal.project import Project # noqa: F401
# TODO: when snap is a real step we can simplify the arguments here.
# fmt: off
def _execute( # noqa: C901
step: steps.Step,
parts: str,
pack_project: bool = False,
output: str = None,
shell: bool = False,
shell_after: bool = False,
**kwargs
) -> "Project":
# fmt: on
build_environment = env.BuilderEnvironmentConfig()
project = get_project(is_managed_host=build_environment.is_managed_host, **kwargs)
conduct_project_sanity_check(project)
# When we are ready to pull the trigger we will trigger this when
# project.info.base is set
if build_environment.is_multipass:
build_provider_class = build_providers.get_provider_for(
build_environment.provider
)
echo.info("Launching a VM.")
with build_provider_class(project=project, echoer=echo) as instance:
instance.mount_project()
try:
if shell:
# shell means we want to do everything right up to the previous
# step and then go into a shell instead of the requested step.
# the "snap" target is a special snowflake that has not made its
# way to be a proper step.
previous_step = None
if pack_project:
previous_step = steps.PRIME
elif step > steps.PULL:
previous_step = step.previous_step()
# steps.PULL is the first step, so we would directly shell into it.
if previous_step:
instance.execute_step(previous_step)
elif pack_project:
instance.pack_project(output=output)
else:
instance.execute_step(step)
except Exception:
if project.debug:
instance.shell()
else:
echo.warning("Run the same command again with --debug to shell into the environment "
"if you wish to introspect this failure.")
raise
else:
if shell or shell_after:
instance.shell()
elif build_environment.is_managed_host or build_environment.is_host:
project_config = project_loader.load_config(project)
lifecycle.execute(step, project_config, parts)
if pack_project:
_pack(project.prime_dir, output=output)
else:
# containerbuild takes a snapcraft command name, not a step
lifecycle.containerbuild(command=step.name, project=project, args=parts)
if pack_project:
_pack(project.prime_dir, output=output)
return project
def _pack(directory: str, *, output: str) -> None:
snap_name = lifecycle.pack(directory, output)
echo.info("Snapped {}".format(snap_name))
@click.group()
@add_build_options()
@click.pass_context
def lifecyclecli(ctx, **kwargs):
pass
@lifecyclecli.command()
def init():
"""Initialize a snapcraft project."""
snapcraft_yaml_path = lifecycle.init()
echo.info("Created {}.".format(snapcraft_yaml_path))
echo.info("Edit the file to your liking or run `snapcraft` to get started")
@lifecyclecli.command()
@click.pass_context
@add_build_options()
@click.argument("parts", nargs=-1, metavar="<part>...", required=False)
def pull(ctx, parts, **kwargs):
"""Download or retrieve artifacts defined for a part.
\b
Examples:
snapcraft pull
snapcraft pull my-part1 my-part2
"""
_execute(steps.PULL, parts, **kwargs)
@lifecyclecli.command()
@add_build_options()
@click.argument("parts", nargs=-1, metavar="<part>...", required=False)
def build(parts, **kwargs):
"""Build artifacts defined for a part.
\b
Examples:
snapcraft build
snapcraft build my-part1 my-part2
"""
_execute(steps.BUILD, parts, **kwargs)
@lifecyclecli.command()
@add_build_options()
@click.argument("parts", nargs=-1, metavar="<part>...", required=False)
def stage(parts, **kwargs):
"""Stage the part's built artifacts into the common staging area.
\b
Examples:
snapcraft stage
snapcraft stage my-part1 my-part2
"""
_execute(steps.STAGE, parts, **kwargs)
@lifecyclecli.command()
@add_build_options()
@click.argument("parts", nargs=-1, metavar="<part>...", required=False)
def prime(parts, **kwargs):
"""Final copy and preparation for the snap.
\b
Examples:
snapcraft prime
snapcraft prime my-part1 my-part2
"""
_execute(steps.PRIME, parts, **kwargs)
@lifecyclecli.command()
@add_build_options()
@click.argument("directory", required=False)
@click.option("--output", "-o", help="path to the resulting snap.")
def snap(directory, output, **kwargs):
"""Create a snap.
\b
Examples:
snapcraft snap
snapcraft snap --output renamed-snap.snap
If you want to snap a directory, you should use the pack command
instead.
"""
if directory:
deprecations.handle_deprecation_notice("dn6")
_pack(directory, output=output)
else:
_execute(steps.PRIME, parts=[], pack_project=True, output=output, **kwargs)
@lifecyclecli.command()
@click.argument("directory")
@click.option("--output", "-o", help="path to the resulting snap.")
def pack(directory, output, **kwargs):
"""Create a snap from a directory holding a valid snap.
The layout of <directory> should contain a valid meta/snap.yaml in
order to be a valid snap.
\b
Examples:
snapcraft pack my-snap-directory
snapcraft pack my-snap-directory --output renamed-snap.snap
"""
_pack(directory, output=output)
@lifecyclecli.command()
@click.argument("parts", nargs=-1, metavar="<part>...", required=False)
@click.option(
"--step",
"-s",
"step_name",
type=click.Choice(["pull", "build", "stage", "prime", "strip"]),
help="only clean the specified step and those that depend on it.",
)
def clean(parts, step_name):
"""Remove content - cleans downloads, builds or install artifacts.
\b
Examples:
snapcraft clean
snapcraft clean my-part --step build
"""
build_environment = env.BuilderEnvironmentConfig()
try:
project = get_project(
is_managed_host=build_environment.is_managed_host
)
except YamlValidationError:
# We need to be able to clean invalid projects too.
project = get_project(
is_managed_host=build_environment.is_managed_host,
skip_snapcraft_yaml=True
)
step = None
if step_name:
if step_name == "strip":
echo.warning(
"DEPRECATED: Use `prime` instead of `strip` as the step to clean"
)
step_name = "prime"
step = steps.get_step_by_name(step_name)
if build_environment.is_lxd:
lxd.Project(project=project, output=None, source=os.path.curdir).clean(
parts, step
)
elif build_environment.is_host:
lifecycle.clean(project, parts, step)
else:
# TODO support for steps.
if parts or step_name:
raise errors.SnapcraftEnvironmentError(
"Build providers are still not feature complete, specifying parts or a step name "
"is not yet supported.")
build_provider_class = build_providers.get_provider_for(
build_environment.provider
)
build_provider_class(project=project, echoer=echo).clean_project()
@lifecyclecli.command()
@add_build_options()
@click.option(
"--remote",
metavar="<remote>",
help="Use a specific lxd remote instead of a local container.",
)
def cleanbuild(remote, **kwargs):
"""Create a snap using a clean environment managed by a build provider.
\b
Examples:
snapcraft cleanbuild
The cleanbuild command requires a properly setup lxd environment that
can connect to external networks. Refer to the "Ubuntu Desktop and
Ubuntu Server" section on
https://linuxcontainers.org/lxd/getting-started-cli
to get started.
If using a remote, a prior setup is required which is described on:
https://linuxcontainers.org/lxd/getting-started-cli/#multiple-hosts
"""
# cleanbuild is a special snow flake, while all the other commands
# would work with the host as the build_provider it makes little
# sense in this scenario.
if sys.platform == "darwin":
default_provider = "multipass"
else:
default_provider = "lxd"
build_environment = env.BuilderEnvironmentConfig(
default=default_provider, additional_providers=["multipass"]
)
project = get_project(
is_managed=build_environment.is_managed_host, **kwargs
)
conduct_project_sanity_check(project)
snap_filename = lifecycle.cleanbuild(
project=project, echoer=echo, remote=remote, build_environment=build_environment
)
echo.info("Retrieved {!r}".format(snap_filename))
if __name__ == "__main__":
lifecyclecli.main()
| 0.001064 |
import numpy as np
class RandomWalk(object):
"""
We consider a random walk, say rw, a process governed by a random variable generated via rnd such that:
rw[i+1] = rw[i] + rnd()
r_walk = RandomWalk()
[r_walk.walk() for i in range(0, np.power(2,8))]
"""
def __init__(self, random_generator=np.random.normal, initial_point=0):
self.rnd_gen = random_generator
self.path = [initial_point]
def __repr__(self):
return str(self.path)
def _is_power_2(self, num):
"""
Checks if a given number is a power of 2, being 0 excluded based on the fact that all power 2 numbers having
only one bit set to one and all other bits to zero, therefore number-1 makes it 0. The check is done via &
bitwise operator
"""
return (num & (num-1) == 0) and (num != 0)
def _walk(self, path_length):
"""
A generator version of the random walk values
@raises ValueError: if path_length is not a power of 2
"""
if not self._is_power_2(path_length):
#TODO: Book keep constants
raise ValueError('The path length is not a power of two')
pos = self.path[0]
for _ in xrange(path_length):
yield pos
pos = pos + self.rnd_gen()
def walk(self, path_length):
"""
Returns a list with the values of the realization of the walk
"""
self.path = list(self._walk(path_length))
def scale(self, power):
"""
Scales the values of the random walk
"""
self.path = [np.power(step, power) for step in self.path]
print self.path
if __name__ == '__main__':
r_walk = RandomWalk()
r_walk.walk(np.power(2,3))
r_walk.scale(2)
bulk_walks = [RandomWalk() for _ in xrange(3)]
[random_walk.walk(np.power(2,3)) for random_walk in bulk_walks]
| 0.006247 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009,
# 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibIndex indexing engine implementation.
See bibindex executable for entry point.
"""
__revision__ = "$Id$"
import re
import sys
import time
import fnmatch
import inspect
from datetime import datetime
from invenio.config import CFG_SOLR_URL
from invenio.bibindex_engine_config import CFG_MAX_MYSQL_THREADS, \
CFG_MYSQL_THREAD_TIMEOUT, \
CFG_CHECK_MYSQL_THREADS, \
CFG_BIBINDEX_INDEX_TABLE_TYPE, \
CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \
CFG_BIBINDEX_UPDATE_MESSAGE, \
CFG_BIBINDEX_UPDATE_MODE, \
CFG_BIBINDEX_TOKENIZER_TYPE, \
CFG_BIBINDEX_WASH_INDEX_TERMS, \
CFG_BIBINDEX_SPECIAL_TAGS
from invenio.bibauthority_config import \
CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC
from invenio.bibauthority_engine import \
get_control_nos_from_recID
from invenio.bibauthorid_dbinterface import get_author_canonical_ids_for_recid
from invenio.search_engine import perform_request_search, \
get_index_stemming_language, \
get_synonym_terms, \
search_pattern, \
search_unit_in_bibrec
from invenio.dbquery import run_sql, DatabaseError, serialize_via_marshal, \
deserialize_via_marshal, wash_table_column_name
from invenio.bibindex_engine_washer import wash_index_term
from invenio.bibtask import task_init, write_message, get_datetime, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required
from invenio.intbitset import intbitset
from invenio.errorlib import register_exception
from invenio.solrutils_bibindex_indexer import solr_commit
from invenio.bibindex_tokenizers.BibIndexJournalTokenizer import \
CFG_JOURNAL_TAG, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM_REGEXP_CHECK
from invenio.bibindex_termcollectors import TermCollector
from invenio.bibindex_engine_utils import load_tokenizers, \
get_all_index_names_and_column_values, \
get_index_tags, \
get_field_tags, \
get_marc_tag_indexes, \
get_nonmarc_tag_indexes, \
get_all_indexes, \
get_index_virtual_indexes, \
get_virtual_index_building_blocks, \
get_index_id_from_index_name, \
run_sql_drop_silently, \
get_min_last_updated, \
remove_inexistent_indexes, \
get_all_synonym_knowledge_bases, \
get_index_remove_stopwords, \
get_index_remove_html_markup, \
get_index_remove_latex_markup, \
filter_for_virtual_indexes, \
get_records_range_for_index, \
make_prefix, \
list_union, \
recognize_marc_tag
from invenio.bibindex_termcollectors import \
TermCollector, \
NonmarcTermCollector
from invenio.memoiseutils import Memoise
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
# precompile some often-used regexp for speed reasons:
re_subfields = re.compile('\$\$\w')
re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])")
re_prefix = re.compile('__[a-zA-Z1-9]*__')
nb_char_in_line = 50 # for verbose pretty printing
chunksize = 1000 # default size of chunks that the records will be treated by
base_process_size = 4500 # process base size
_last_word_table = None
_TOKENIZERS = load_tokenizers()
def list_unique(_list):
"""Returns a _list with duplicates removed."""
_dict = {}
for e in _list:
_dict[e] = 1
return _dict.keys()
# safety function for killing slow DB threads:
def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS,
thread_timeout=CFG_MYSQL_THREAD_TIMEOUT):
"""Check the number of DB threads and if there are more than
MAX_THREADS of them, lill all threads that are in a sleeping
state for more than THREAD_TIMEOUT seconds. (This is useful
for working around the the max_connection problem that appears
during indexation in some not-yet-understood cases.) If some
threads are to be killed, write info into the log file.
"""
res = run_sql("SHOW FULL PROCESSLIST")
if len(res) > max_threads:
for row in res:
r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row
if r_command == "Sleep" and int(r_time) > thread_timeout:
run_sql("KILL %s", (r_id, ))
write_message("WARNING: too many DB threads, " + \
"killing thread %s" % r_id, verbose=1)
return
def get_associated_subfield_value(recID, tag, value, associated_subfield_code):
"""Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record
RECID and TAG of value VALUE. Used by fulltext indexer only.
Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode),
otherwise en empty string is returned.
FIXME: what if many tag values have the same value but different
associated_subfield_code? Better use bibrecord library for this.
"""
out = ""
if len(tag) != 6:
return out
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE
%%s%%""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag[:-1]))
field_number = -1
for row in res:
if row[1] == tag and row[2] == value:
field_number = row[0]
if field_number > 0:
for row in res:
if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code:
out = row[2]
break
return out
def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Atomically swap reindexed temporary table with the original one.
Delete the now-old one."""
write_message("Putting new tmp index tables " + \
"for id %s into production" % index_id)
run_sql(
"RENAME TABLE " +
"idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) +
"%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) +
"idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) +
"%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) +
"%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) +
"%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dR TO old_idxPHRASE%02dR," % (index_id, index_id) +
"%sidxPHRASE%02dR TO idxPHRASE%02dR," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dF TO old_idxPHRASE%02dF," % (index_id, index_id) +
"%sidxPHRASE%02dF TO idxPHRASE%02dF;" % (reindex_prefix, index_id, index_id)
)
write_message("Dropping old index tables for id %s" % index_id)
run_sql_drop_silently("""DROP TABLE old_idxWORD%02dR,
old_idxWORD%02dF,
old_idxPAIR%02dR,
old_idxPAIR%02dF,
old_idxPHRASE%02dR,
old_idxPHRASE%02dF""" % ((index_id, )* 6)
) # kwalitee: disable=sql
def init_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Create reindexing temporary tables."""
write_message("Creating new tmp index tables for id %s" % index_id)
query = """DROP TABLE IF EXISTS %sidxWORD%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxWORD%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL default '0',
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
def remove_subfields(s):
"Removes subfields from string, e.g. 'foo $$c bar' becomes 'foo bar'."
return re_subfields.sub(' ', s)
def get_field_indexes(field):
"""Returns indexes names and ids corresponding to the given field"""
if recognize_marc_tag(field):
#field is actually a tag
return get_marc_tag_indexes(field, virtual=False)
else:
return get_nonmarc_tag_indexes(field, virtual=False)
get_field_indexes_memoised = Memoise(get_field_indexes)
def get_index_tokenizer(index_id):
"""Returns value of a tokenizer field from idxINDEX database table
@param index_id: id of the index
"""
query = "SELECT tokenizer FROM idxINDEX WHERE id=%s" % index_id
out = None
try:
res = run_sql(query)
if res:
out = _TOKENIZERS[res[0][0]]
except DatabaseError:
write_message("Exception caught for SQL statement: %s; " + \
"column tokenizer might not exist" % query, sys.stderr)
except KeyError:
write_message("Exception caught: there is no such tokenizer")
out = None
return out
def detect_tokenizer_type(tokenizer):
"""
Checks what is the main type of the tokenizer.
For more information on tokenizer types take
a look at BibIndexTokenizer class.
@param tokenizer: instance of a tokenizer
"""
from invenio.bibindex_tokenizers.BibIndexStringTokenizer import BibIndexStringTokenizer
from invenio.bibindex_tokenizers.BibIndexRecJsonTokenizer import BibIndexRecJsonTokenizer
from invenio.bibindex_tokenizers.BibIndexMultiFieldTokenizer import BibIndexMultiFieldTokenizer
tokenizer_inheritance_tree = inspect.getmro(tokenizer.__class__)
if BibIndexStringTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['string']
if BibIndexMultiFieldTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['multifield']
if BibIndexRecJsonTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['recjson']
return CFG_BIBINDEX_TOKENIZER_TYPE['unknown']
def get_last_updated_all_indexes():
"""Returns last modification date for all defined indexes"""
query= """SELECT name, last_updated FROM idxINDEX"""
res = run_sql(query)
return res
def split_ranges(parse_string):
"""Parse a string a return the list or ranges."""
recIDs = []
ranges = parse_string.split(",")
for arange in ranges:
tmp_recIDs = arange.split("-")
if len(tmp_recIDs) == 1:
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[0])])
else:
if int(tmp_recIDs[0]) > int(tmp_recIDs[1]): # sanity check
tmp = tmp_recIDs[0]
tmp_recIDs[0] = tmp_recIDs[1]
tmp_recIDs[1] = tmp
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[1])])
return recIDs
def get_word_tables(tables):
""" Given a list of table names it return a list of tuples
(index_id, index_name, index_tags).
"""
wordTables = []
if tables:
for index in tables:
index_id = get_index_id_from_index_name(index)
if index_id:
wordTables.append((index_id, index, get_index_tags(index)))
else:
write_message("Error: There is no %s words table." % \
index, sys.stderr)
return wordTables
def get_date_range(var):
"Returns the two dates contained as a low,high tuple"
limits = var.split(",")
if len(limits) == 1:
low = get_datetime(limits[0])
return low, None
if len(limits) == 2:
low = get_datetime(limits[0])
high = get_datetime(limits[1])
return low, high
return None, None
def create_range_list(res):
"""Creates a range list from a recID select query result contained
in res. The result is expected to have ascending numerical order."""
if not res:
return []
row = res[0]
if not row:
return []
else:
range_list = [[row, row]]
for row in res[1:]:
row_id = row
if row_id == range_list[-1][1] + 1:
range_list[-1][1] = row_id
else:
range_list.append([row_id, row_id])
return range_list
def beautify_range_list(range_list):
"""Returns a non overlapping, maximal range list"""
ret_list = []
for new in range_list:
found = 0
for old in ret_list:
if new[0] <= old[0] <= new[1] + 1 or new[0] - 1 <= old[1] <= new[1]:
old[0] = min(old[0], new[0])
old[1] = max(old[1], new[1])
found = 1
break
if not found:
ret_list.append(new)
return ret_list
def truncate_index_table(index_name):
"""Properly truncate the given index."""
index_id = get_index_id_from_index_name(index_name)
if index_id:
write_message('Truncating %s index table in order to reindex.' % \
index_name, verbose=2)
run_sql("""UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00'
WHERE id=%s""", (index_id, ))
run_sql("TRUNCATE idxWORD%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxWORD%02dR" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dR" % index_id) # kwalitee: disable=sql
def update_index_last_updated(indexes, starting_time=None):
"""Update last_updated column of the index table in the database.
Puts starting time there so that if the task
was interrupted for record download,
the records will be reindexed next time.
@param indexes: list of indexes names
"""
if starting_time is None:
return None
for index_name in indexes:
write_message("updating last_updated to %s...for %s index" % \
(starting_time, index_name), verbose=9)
run_sql("UPDATE idxINDEX SET last_updated=%s WHERE name=%s",
(starting_time, index_name))
def get_percentage_completed(num_done, num_total):
""" Return a string containing the approx. percentage completed """
percentage_remaining = 100.0 * float(num_done) / float(num_total)
if percentage_remaining:
percentage_display = "(%.1f%%)" % (percentage_remaining, )
else:
percentage_display = ""
return percentage_display
def _fill_dict_of_indexes_with_empty_sets():
"""find_affected_records internal function.
Creates dict: {'index_name1':set([]), ...}
"""
index_dict = {}
tmp_all_indexes = get_all_indexes(virtual=False)
for index in tmp_all_indexes:
index_dict[index] = set([])
return index_dict
def find_affected_records_for_index(indexes=None, recIDs=None, force_all_indexes=False):
"""
Function checks which records need to be changed/reindexed
for given index/indexes.
Makes use of hstRECORD table where
different revisions of record are kept.
If parameter force_all_indexes is set
function will assign all recIDs to all indexes.
@param indexes: names of indexes for reindexation separated by coma
@param recIDs: recIDs for reindexation in form:
[[range1_down, range1_up],[range2_down, range2_up]..]
@param force_all_indexes: should we index all indexes?
"""
if indexes is None:
indexes = []
if recIDs is None:
recIDs = []
tmp_dates = dict(get_last_updated_all_indexes())
modification_dates = dict([(date, tmp_dates[date] or datetime(1000, 1, 1, 1, 1, 1))
for date in tmp_dates])
tmp_all_indexes = get_all_indexes(virtual=False)
indexes = remove_inexistent_indexes(indexes, leave_virtual=False)
if not indexes:
return {}
def _should_reindex_for_revision(index_name, revision_date):
try:
if modification_dates[index_name] < revision_date and \
index_name in indexes:
return True
return False
except KeyError:
return False
if force_all_indexes:
records_for_indexes = {}
all_recIDs = []
for recIDs_range in recIDs:
all_recIDs.extend(range(recIDs_range[0], recIDs_range[1]+1))
for index in indexes:
records_for_indexes[index] = all_recIDs
return records_for_indexes
min_last_updated = get_min_last_updated(indexes)[0][0] or \
datetime(1000, 1, 1, 1, 1, 1)
recIDs_info = []
for recIDs_range in recIDs:
# firstly, determine which records were updated since min_last_updated:
query = """SELECT id_bibrec,job_date,affected_fields FROM hstRECORD
WHERE id_bibrec BETWEEN %s AND %s AND
job_date > '%s'""" % \
(recIDs_range[0], recIDs_range[1], min_last_updated)
res = run_sql(query)
if res:
recIDs_info.extend(res)
# secondly, there may be newly inserted records which were
# uploaded with old timestamp (via 005), so let us detect
# those too, using their "real" modification_date:
res = run_sql("""SELECT bibrec.id,modification_date,''
FROM bibrec, hstRECORD
WHERE modification_date>%s
AND bibrec.id=id_bibrec
AND (SELECT COUNT(*) FROM hstRECORD WHERE id_bibrec=bibrec.id)=1""", (min_last_updated,))
if res:
recIDs_info.extend(res)
indexes_to_change = _fill_dict_of_indexes_with_empty_sets()
for recID_info in recIDs_info:
recID, revision, affected_fields = recID_info
affected_fields = affected_fields.split(",")
indexes_for_recID = set()
for field in affected_fields:
if field:
field_indexes = get_field_indexes_memoised(field) or []
indexes_names = set([idx[1] for idx in field_indexes])
indexes_for_recID |= indexes_names
else:
# record was inserted, all fields were changed,
# no specific affected fields
indexes_for_recID |= set(tmp_all_indexes)
indexes_for_recID_filtered = [ind for ind in indexes_for_recID if _should_reindex_for_revision(ind, revision)]
for index in indexes_for_recID_filtered:
indexes_to_change[index].add(recID)
indexes_to_change = dict((k, list(sorted(v))) for k, v in indexes_to_change.iteritems() if v)
return indexes_to_change
def chunk_generator(rng):
"""
Splits one range into several smaller ones
with respect to global chunksize variable.
@param rng: range of records
@type rng: list in the form: [1, 2000]
"""
global chunksize
current_low = rng[0]
current_high = rng[0]
if rng[0] == None or rng[1] == None:
raise StopIteration
if rng[1] - rng[0] + 1 <= chunksize:
yield rng
else:
while current_high - 1 < rng[1]:
current_high += chunksize
yield current_low, min(current_high - 1, rng[1])
current_low += chunksize
class AbstractIndexTable(object):
"""
This class represents an index table in database.
An index consists of three different kinds of tables:
table which stores only words in db,
table which stores pairs of words and
table which stores whole phrases.
The class represents only one table. Another instance of
the class must be created in order to store different
type of terms.
This class is an abstract class. It contains methods
to connect to db and methods which facilitate
inserting/modifing/removing terms from it. The class
also contains methods which help managing the memory.
All specific methods for indexing can be found in corresponding
classes for virtual and regular indexes.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
self.index_name = index_name
self.index_id = get_index_id_from_index_name(index_name)
self.table_type = table_type
self.wash_index_terms = wash_index_terms
self.table_name = wash_table_column_name(table_prefix + \
"idx" + \
table_type + \
("%02d" % self.index_id) + "F")
self.table_prefix = table_prefix
self.value = {} # cache
self.recIDs_in_mem = []
def put_into_db(self, mode="normal"):
"""Updates the current words table in the corresponding DB
idxFOO table. Mode 'normal' means normal execution,
mode 'emergency' means words index reverting to old state.
"""
write_message("%s %s wordtable flush started" % \
(self.table_name, mode))
write_message('...updating %d words into %s started' % \
(len(self.value), self.table_name))
task_update_progress("(%s:%s) flushed %d/%d words" % \
(self.table_name, self.index_name, 0, len(self.value)))
self.recIDs_in_mem = beautify_range_list(self.recIDs_in_mem)
tab_name = self.table_name[:-1] + "R"
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='TEMPORARY' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='CURRENT'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
nb_words_total = len(self.value)
nb_words_report = int(nb_words_total / 10.0)
nb_words_done = 0
for word in self.value.keys():
self.put_word_into_db(word)
nb_words_done += 1
if nb_words_report != 0 and ((nb_words_done % nb_words_report) == 0):
write_message('......processed %d/%d words' % \
(nb_words_done, nb_words_total))
percentage_display = get_percentage_completed(nb_words_done, nb_words_total)
task_update_progress("(%s:%s) flushed %d/%d words %s" % \
(tab_name, self.index_name,
nb_words_done, nb_words_total,
percentage_display))
write_message('...updating %d words into %s ended' % \
(nb_words_total, tab_name))
write_message('...updating reverse table %s started' % tab_name)
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of updating wordTable into %s' % \
tab_name, verbose=9)
elif mode == "emergency":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of emergency flushing wordTable into %s' % \
tab_name, verbose=9)
write_message('...updating reverse table %s ended' % tab_name)
self.clean()
self.recIDs_in_mem = []
write_message("%s %s wordtable flush ended" % \
(self.table_name, mode))
task_update_progress("(%s:%s) flush ended" % \
(self.table_name, self.index_name))
def put_word_into_db(self, word):
"""Flush a single word to the database and delete it from memory"""
set = self.load_old_recIDs(word)
if set is not None: # merge the word recIDs found in memory:
hitlist_was_changed = self.merge_with_old_recIDs(word, set)
if not hitlist_was_changed:
# nothing to update:
write_message("......... unchanged hitlist for ``%s''" % \
word, verbose=9)
else:
# yes there were some new words:
write_message("......... updating hitlist for ``%s''" % \
word, verbose=9)
run_sql("UPDATE %s SET hitlist=%%s WHERE term=%%s" % wash_table_column_name(self.table_name), (set.fastdump(), word)) # kwalitee: disable=sql
else: # the word is new, will create new set:
write_message("......... inserting hitlist for ``%s''" % \
word, verbose=9)
set = intbitset(self.value[word].keys())
try:
run_sql("INSERT INTO %s (term, hitlist) VALUES (%%s, %%s)" % wash_table_column_name(self.table_name), (word, set.fastdump())) # kwalitee: disable=sql
except Exception, e:
## We send this exception to the admin only when is not
## already reparing the problem.
register_exception(prefix="Error when putting the term '%s' into db (hitlist=%s): %s\n" % (repr(word), set, e), alert_admin=(task_get_option('cmd') != 'repair'))
if not set: # never store empty words
run_sql("DELETE FROM %s WHERE term=%%s" % wash_table_column_name(self.table_name), (word,)) # kwalitee: disable=sql
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if value.has_key(word):
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except Exception as e:
write_message("Error: Cannot put word %s with sign %d for recID %s." % \
(word, sign, recID))
def load_old_recIDs(self, word):
"""Load existing hitlist for the word from the database index files."""
query = "SELECT hitlist FROM %s WHERE term=%%s" % self.table_name
res = run_sql(query, (word, ))
if res:
return intbitset(res[0][0])
else:
return None
def merge_with_old_recIDs(self, word, set):
"""Merge the system numbers stored in memory
(hash of recIDs with value +1 or -1 according
to whether to add/delete them) with those stored
in the database index and received in set universe
of recIDs for the given word.
Return False in case no change was done to SET, return True in case SET
was changed.
"""
oldset = intbitset(set)
set.update_with_signs(self.value[word])
return set != oldset
def clean(self):
"Cleans the cache."
self.value = {}
class VirtualIndexTable(AbstractIndexTable):
"""
There are two types of indexes: virtual and regular/normal.
Check WordTable class for more on normal indexes.
This class represents a single index table for virtual index
(see also: AbstractIndexTable).
Virtual index doesn't store its own terms,
it accumulates terms from other indexes.
Good example of virtual index is the global index which stores
terms from title, abstract, keyword, author and so on.
This class contains methods for indexing virtual indexes.
See also: run_update()
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""
Creates VirtualIndexTable instance.
@param index_name: name of the index we want to reindex
@param table_type: words, pairs or phrases
@param table_prefix: add "tmp_" if you want to
reindex to temporary table
"""
AbstractIndexTable.__init__(self, index_name,
table_type,
table_prefix,
wash_index_terms)
self.mode = "normal"
self.dependent_indexes = dict(get_virtual_index_building_blocks(self.index_id))
def set_reindex_mode(self):
"""
Sets reindex mode. VirtualIndexTable will
remove all its content from database and
use insert_index function to repopulate it.
"""
self.mode = "reindex"
def run_update(self, flush=10000):
"""
Function starts all updating processes for virtual index.
It will take all information about pending changes from database
from queue tables (idxWORD/PAIR/PHRASExxQ), process them
and trigger appropriate indexing functions.
@param flush: how many records we will put in one go
into database (at most);
see also: opt_flush in WordTable class
"""
global chunksize
if self.mode == "reindex":
self.clean_database()
for index_id, index_name in self.dependent_indexes.iteritems():
rng = get_records_range_for_index(index_id)
flush_count = 0
if not rng:
continue
write_message('Virtual index: %s is being reindexed for %s index' % \
(self.index_name, index_name))
chunks = chunk_generator(rng)
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.insert_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
else:
for index_id, index_name in self.dependent_indexes.iteritems():
query = """SELECT id_bibrec_low, id_bibrec_high, mode FROM %s
WHERE index_name=%%s
ORDER BY runtime ASC""" % \
(self.table_name[:-1] + "Q")
entries = self.remove_duplicates(run_sql(query, (index_name, )))
if entries:
write_message('Virtual index: %s is being updated for %s index' % \
(self.index_name, index_name))
for entry in entries:
operation = None
recID_low, recID_high, mode = entry
if mode == CFG_BIBINDEX_UPDATE_MODE["Update"]:
operation = self.update_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Remove"]:
operation = self.remove_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Insert"]:
operation = self.insert_index
flush_count = 0
chunks = chunk_generator([recID_low, recID_high])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
operation(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
def retrieve_new_values_from_index(self, index_id, records_range):
"""
Retrieves new values from dependent index
for specific range of records.
@param index_id: id of the dependent index
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
tab_name = "idx" + self.table_type + ("%02d" % index_id) + "R"
query = """SELECT id_bibrec, termlist FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s""" % tab_name
new_regular_values = run_sql(query, (records_range[0], records_range[1]))
if new_regular_values:
zipped = zip(*new_regular_values)
new_regular_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
new_regular_values = dict()
return new_regular_values
def retrieve_old_values(self, records_range):
"""
Retrieves old values from database for this virtual index
for specific records range.
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
virtual_tab_name = self.table_name[:-1] + "R"
query = """SELECT id_bibrec, termlist FROM %s
WHERE type='CURRENT' AND
id_bibrec BETWEEN %%s AND %%s""" % virtual_tab_name
old_virtual_values = run_sql(query, (records_range[0], records_range[1]))
if old_virtual_values:
zipped = zip(*old_virtual_values)
old_virtual_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
old_virtual_values = dict()
return old_virtual_values
def update_index(self, index_id, recID_low, recID_high):
"""
Updates the state of virtual index for records in range:
recID_low, recID_high for index specified by index_id.
Function stores terms in idxWORD/PAIR/PHRASExxR tables with
prefixes for specific index, for example term 'ellis'
from author index will be stored in reversed table as:
'__author__ellis'. It allows fast operations on only part of terms
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
update_cache_for_record = self.update_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = update_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("""INSERT INTO %s (id_bibrec,termlist,type)
VALUES (%%s,%%s,'FUTURE')""" % \
wash_table_column_name(virtual_tab_name),
(recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def insert_index(self, index_id, recID_low, recID_high):
"""
Inserts terms from dependent index to virtual table
without looking what's inside the virtual table and
what terms are being added. It's faster than 'updating',
but it can only be used when virtual table is free of
terms from this dependent index.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
insert_to_cache_for_record = self.insert_to_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = insert_to_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def remove_index(self, index_id, recID_low, recID_high):
"""
Removes words found in dependent index from reversed
table of virtual index. Updates the state of the memory
(for future removal from forward table).
Takes into account that given words can be found in more
that one dependent index and it won't mark these words
for the removal process.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
remove_from_cache_for_record = self.remove_from_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
old_values = old_virtual_values.get(recID) or []
to_serialize = remove_from_cache_for_record(index_name, recID, old_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def update_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates memory (cache) with information on what to
remove/add/modify in forward table for specified record.
It also returns new terms which should be indexed for given record.
@param index_name: index name of dependent index
@param recID: considered record
@param old_values: all old values from all dependent indexes
for this virtual index for recID
@param new_values: new values from some dependent index
which should be added
"""
prefix = make_prefix(index_name)
put = self.put
new_values_prefix = [prefix + term for term in new_values]
part_values = []
tmp_old_values_prefix = []
# split old values from v.index into those with 'prefix' and those without
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
part_values.append(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
tmp_old_values_prefix.append(term)
# remember not to remove words that occur more than once
part_values = set(part_values)
for value in tmp_old_values_prefix:
term_without_prefix = re.sub(re_prefix, '', value)
if term_without_prefix in part_values:
put(recID, term_without_prefix, 1)
for term_without_prefix in new_values:
put(recID, term_without_prefix, 1)
tmp_new_values_prefix = list(tmp_old_values_prefix)
tmp_new_values_prefix.extend(new_values_prefix)
return tmp_new_values_prefix
def insert_to_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates cache with terms which should be inserted to database.
Used in insert_index function. See also: update_cache_for_record
which is analogous for update_index function.
"""
prefix = make_prefix(index_name)
append = old_values.append
put = self.put
for term in new_values:
append(prefix + term)
put(recID, term, 1)
return old_values
def remove_from_cache_for_record(self, index_name, recID, old_values):
"""
Updates information in cache with terms which should be removed
from virtual table. Used in remove_index function.
"""
prefix = make_prefix(index_name)
tmp_rest = []
tmp_removed = []
tmp_new_values = []
append_to_new = tmp_new_values.append
append_to_rest = tmp_rest.append
append_to_removed = tmp_removed.append
put = self.put
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
append_to_removed(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
append_to_rest(re.sub(re_prefix, '', term))
append_to_new(term)
to_remember = set(tmp_rest) & set(tmp_removed)
for term_without_prefix in to_remember:
put(recID, term_without_prefix, 1)
return tmp_new_values
def clean_database(self):
"""Removes all entries from corresponding tables in database"""
query = """DELETE FROM %s""" % self.table_name
run_sql(query)
query = """DELETE FROM %s""" % self.table_name[:-1] + "R"
run_sql(query)
def clean_queue_table(self, index_name):
"""
Cleans queue table (i.e. idxWORD/PAIR/PHRASExxQ)
for specific index. It means that function will remove
all entries from db from queue table for this index.
"""
query = "DELETE FROM %s WHERE index_name='%s'" % \
(self.table_name[:-1].lstrip(self.table_prefix) + "Q",
index_name)
run_sql(query)
def remove_duplicates(self, entries):
"""
Removes duplicates from a list of entries (taken from Queue table)
in order to process a single command only once.
Queue table may look like this:
id (..) id_bibrec_low id_bibrec_high index_name mode
...
12 1 100 title update
13 1 100 title update
We don't want to perform the same operation twice. First we want to
squash the same commands into one.
@param entries: list of entries taken from the database
"""
unique = set()
return [entry for entry in entries if entry not in unique and not unique.add(entry)]
def remove_dependent_index(self, index_name):
"""
Removes dependent index from this virtual index.
It means removing all words from all records with prefix:
__index_name__ from reversed table, and removing some of
them from forward table if they don't appear in another
dependent index.
@param index_name: name of the dependent index to remove
"""
flush = 10000
dependent = self.dependent_indexes.values()
if len(dependent) == 0:
write_message("Specified index is not virtual...")
return
if index_name not in dependent:
write_message("Dependent index already removed...")
return
index_id = get_index_id_from_index_name(index_name)
records_range = get_records_range_for_index(index_id)
write_message("Removing an index: %s" % index_name)
if records_range:
flush_count = 0
chunks = chunk_generator([records_range[0], records_range[1]])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.remove_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(chunk)
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
class WordTable(AbstractIndexTable):
"""
This class represents a single index table of regular index
(regular means it doesn't accumulates data from other indexes,
but it takes data directly from metadata of records which
are being indexed; for other type of index check: VirtualIndexTable).
To start indexing process one need to invoke add_recIDs() method.
For furher reading see description of this method.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""Creates words table instance.
@param index_name: the index name
@param index_id: the index integer identificator
@param fields_to_index: a list of fields to index
@param table_type: type of the wordtable: Words, Pairs, Phrases
@param table_prefix: prefix for table name, indexing will be performed
on table: <<table_prefix>>idx<<wordtable_type>>XXF
@param wash_index_terms: do we wash index terms, and if yes (when >0),
how many characters do we keep in the index terms; see
max_char_length parameter of wash_index_term()
"""
AbstractIndexTable.__init__(self, index_name, table_type, table_prefix, wash_index_terms)
self.tags = get_index_tags(index_name, virtual=False)
self.nonmarc_tags = get_index_tags(index_name,
virtual=False,
tagtype="nonmarc")
self.timestamp = datetime.now()
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Update"]
try:
self.stemming_language = get_index_stemming_language(self.index_id)
except KeyError:
self.stemming_language = ''
self.remove_stopwords = get_index_remove_stopwords(self.index_id)
self.remove_html_markup = get_index_remove_html_markup(self.index_id)
self.remove_latex_markup = get_index_remove_latex_markup(self.index_id)
self.tokenizer = get_index_tokenizer(self.index_id)(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
self.tokenizer_type = detect_tokenizer_type(self.tokenizer)
self.default_tokenizer_function = self.tokenizer.get_tokenizing_function(table_type)
self.special_tags = self._handle_special_tags()
if self.stemming_language and self.table_name.startswith('idxWORD'):
write_message('%s has stemming enabled, language %s' % (self.table_name, self.stemming_language))
def _handle_special_tags(self):
"""
Fills in a dict with special tags which
always use the same tokenizer and this
tokenizer is independent of index.
"""
special_tags = {}
fields = self.tags + self.nonmarc_tags
for tag in fields:
if tag in CFG_BIBINDEX_SPECIAL_TAGS:
for t in CFG_BIBINDEX_INDEX_TABLE_TYPE:
if self.table_type == CFG_BIBINDEX_INDEX_TABLE_TYPE[t]:
tokenizer_name = CFG_BIBINDEX_SPECIAL_TAGS[tag][t]
tokenizer = _TOKENIZERS[tokenizer_name]
instance = tokenizer(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
special_tags[tag] = instance.get_tokenizing_function(self.table_type)
break
return special_tags
def turn_off_virtual_indexes(self):
"""
Prevents from reindexing related virtual indexes.
"""
self.virtual_indexes = []
def turn_on_virtual_indexes(self):
"""
Turns on indexing related virtual indexes.
"""
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
def get_field(self, recID, tag):
"""Returns list of values of the MARC-21 'tag' fields for the
record 'recID'."""
out = []
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id
AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag))
for row in res:
out.append(row[0])
return out
def notify_virtual_indexes(self, recID_ranges):
"""
Informs all related virtual indexes about index change.
Function leaves information about the change for each index
in proper table in database (idxSOMETHINGxxQ).
@param recID_ranges: low and high recIDs of ranges
@type recID_ranges: list [[low_id1, high_id1], [low_id2, high_id2]...]
"""
query = """INSERT INTO %s (runtime, id_bibrec_low, id_bibrec_high, index_name, mode)
VALUES (%%s, %%s, %%s, %%s, %%s)"""
for index_id, index_name in self.virtual_indexes:
tab_name = "idx%s%02dQ" % (self.table_type, index_id)
full_query = query % tab_name
for recID_range in recID_ranges:
run_sql(full_query, (self.timestamp,
recID_range[0],
recID_range[1],
self.index_name,
self.virtual_index_update_mode))
def display(self):
"Displays the word table."
keys = self.value.keys()
keys.sort()
for k in keys:
write_message("%s: %s" % (k, self.value[k]))
def count(self):
"Returns the number of words in the table."
return len(self.value)
def info(self):
"Prints some information on the words table."
write_message("The words table contains %d words." % self.count())
def lookup_words(self, word=""):
"Lookup word from the words table."
if not word:
done = 0
while not done:
try:
word = raw_input("Enter word: ")
done = 1
except (EOFError, KeyboardInterrupt):
return
if self.value.has_key(word):
write_message("The word '%s' is found %d times." \
% (word, len(self.value[word])))
else:
write_message("The word '%s' does not exist in the word file."\
% word)
def add_recIDs(self, recIDs, opt_flush):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
global chunksize, _last_word_table
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
try:
self.chk_recID_range(i_low, i_high)
except StandardError:
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
raise
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if CFG_CHECK_MYSQL_THREADS:
kill_sleepy_mysql_threads()
percentage_display = get_percentage_completed(records_done, records_to_go)
task_update_progress("(%s:%s) adding recs %d-%d %s" % (self.table_name, self.index_name, i_low, i_high, percentage_display))
self.del_recID_range(i_low, i_high)
just_processed = self.add_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + just_processed
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db()
self.clean()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
write_message("%s backing up" % (self.table_name))
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
self.log_progress(time_started, records_done, records_to_go)
self.notify_virtual_indexes(recIDs)
def add_recID_range(self, recID1, recID2):
"""Add records from RECID1 to RECID2."""
wlist = {}
self.recIDs_in_mem.append([recID1, recID2])
# special case of author indexes where we also add author
# canonical IDs:
if self.index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
for recID in range(recID1, recID2 + 1):
if not wlist.has_key(recID):
wlist[recID] = []
wlist[recID] = list_union(get_author_canonical_ids_for_recid(recID),
wlist[recID])
marc, nonmarc = self.find_nonmarc_records(recID1, recID2)
if marc:
collector = TermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(marc, wlist)
if nonmarc:
collector = NonmarcTermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.nonmarc_tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(nonmarc, wlist)
# lookup index-time synonyms:
synonym_kbrs = get_all_synonym_knowledge_bases()
if synonym_kbrs.has_key(self.index_name):
if len(wlist) == 0: return 0
recIDs = wlist.keys()
for recID in recIDs:
for word in wlist[recID]:
word_synonyms = get_synonym_terms(word,
synonym_kbrs[self.index_name][0],
synonym_kbrs[self.index_name][1],
use_memoise=True)
if word_synonyms:
wlist[recID] = list_union(word_synonyms, wlist[recID])
# were there some words for these recIDs found?
recIDs = wlist.keys()
for recID in recIDs:
# was this record marked as deleted?
if "DELETED" in self.get_field(recID, "980__c"):
wlist[recID] = []
write_message("... record %d was declared deleted, removing its word list" % recID, verbose=9)
write_message("... record %d, termlist: %s" % (recID, wlist[recID]), verbose=9)
if len(wlist) == 0: return 0
# put words into reverse index table with FUTURE status:
for recID in recIDs:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal(wlist[recID]))) # kwalitee: disable=sql
# ... and, for new records, enter the CURRENT status as empty:
try:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
# okay, it's an already existing record, no problem
pass
# put words into memory word list:
put = self.put
for recID in recIDs:
for w in wlist[recID]:
put(recID, w, 1)
return len(recIDs)
def find_nonmarc_records(self, recID1, recID2):
"""Divides recID range into two different tables,
first one contains only recIDs of the records that
are Marc type and the second one contains records
of nonMarc type"""
marc = range(recID1, recID2 + 1)
nonmarc = []
query = """SELECT id FROM %s WHERE master_format <> 'marc'
AND id BETWEEN %%s AND %%s""" % "bibrec"
res = run_sql(query, (recID1, recID2))
if res:
nonmarc = list(zip(*res)[0])
if len(nonmarc) == (recID2 - recID1 + 1):
nonmarc = xrange(recID1, recID2 + 1)
marc = []
else:
for recID in nonmarc:
marc.remove(recID)
else:
marc = xrange(recID1, recID2 + 1)
return [marc, nonmarc]
def log_progress(self, start, done, todo):
"""Calculate progress and store it.
start: start time,
done: records processed,
todo: total number of records"""
time_elapsed = time.time() - start
# consistency check
if time_elapsed == 0 or done > todo:
return
time_recs_per_min = done / (time_elapsed / 60.0)
write_message("%d records took %.1f seconds to complete.(%1.f recs/min)"\
% (done, time_elapsed, time_recs_per_min))
if time_recs_per_min:
write_message("Estimated runtime: %.1f minutes" % \
((todo - done) / time_recs_per_min))
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if value.has_key(word):
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except:
write_message("Error: Cannot put word %s with sign %d for recID %s." % (word, sign, recID))
def del_recIDs(self, recIDs):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
count = 0
for arange in recIDs:
task_sleep_now_if_required()
self.del_recID_range(arange[0], arange[1])
count = count + arange[1] - arange[0]
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Remove"]
self.put_into_db()
self.notify_virtual_indexes(recIDs)
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
def del_recID_range(self, low, high):
"""Deletes records with 'recID' system number between low
and high from memory words index table."""
write_message("%s fetching existing words for records #%d-#%d started" % \
(self.table_name, low, high), verbose=3)
self.recIDs_in_mem.append([low, high])
query = """SELECT id_bibrec,termlist FROM %sR as bb WHERE bb.id_bibrec
BETWEEN %%s AND %%s""" % (self.table_name[:-1])
recID_rows = run_sql(query, (low, high))
for recID_row in recID_rows:
recID = recID_row[0]
wlist = deserialize_via_marshal(recID_row[1])
for word in wlist:
self.put(recID, word, -1)
write_message("%s fetching existing words for records #%d-#%d ended" % \
(self.table_name, low, high), verbose=3)
def check_bad_words(self):
"""
Finds bad words in reverse tables. Returns True in case of bad words.
"""
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE') LIMIT 1""" \
% (self.table_name[:-1],)
res = run_sql(query)
return bool(res)
def report_on_table_consistency(self):
"""Check reverse words index tables (e.g. idxWORD01R) for
interesting states such as 'TEMPORARY' state.
Prints small report (no of words, no of bad words).
"""
# find number of words:
query = """SELECT COUNT(1) FROM %s""" % (self.table_name)
res = run_sql(query, None, 1)
if res:
nb_words = res[0][0]
else:
nb_words = 0
# report stats:
write_message("%s contains %d words" % (self.table_name, nb_words))
# find possible bad states in reverse tables:
if self.check_bad_words():
write_message("EMERGENCY: %s needs to be repaired" %
(self.table_name, ))
else:
write_message("%s is in consistent state" % (self.table_name))
def repair(self, opt_flush):
"""Repair the whole table"""
# find possible bad states in reverse tables:
if not self.check_bad_words():
return
query = """SELECT id_bibrec FROM %sR WHERE type IN ('TEMPORARY','FUTURE')""" \
% (self.table_name[:-1])
res = intbitset(run_sql(query))
recIDs = create_range_list(list(res))
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
self.fix_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + i_high - i_low + 1
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db("emergency")
self.clean()
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db("emergency")
self.log_progress(time_started, records_done, records_to_go)
write_message("%s inconsistencies repaired." % self.table_name)
def chk_recID_range(self, low, high):
"""Check if the reverse index table is in proper state"""
## check db
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE')
AND id_bibrec BETWEEN %%s AND %%s LIMIT 1""" % self.table_name[:-1]
res = run_sql(query, (low, high), 1)
if not res:
write_message("%s for %d-%d is in consistent state" % (self.table_name, low, high))
return # okay, words table is consistent
## inconsistency detected!
write_message("EMERGENCY: %s inconsistencies detected..." % self.table_name)
error_message = "Errors found. You should check consistency of the " \
"%s - %sR tables.\nRunning 'bibindex --repair' is " \
"recommended." % (self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def fix_recID_range(self, low, high):
"""Try to fix reverse index database consistency
(e.g. table idxWORD01R) in the low,high doc-id range.
Possible states for a recID follow:
CUR TMP FUT: very bad things have happened: warn!
CUR TMP : very bad things have happened: warn!
CUR FUT: delete FUT (crash before flushing)
CUR : database is ok
TMP FUT: add TMP to memory and del FUT from memory
flush (revert to old state)
TMP : very bad things have happened: warn!
FUT: very bad things have happended: warn!
"""
state = {}
query = "SELECT id_bibrec,type FROM %sR WHERE id_bibrec BETWEEN %%s AND %%s"\
% self.table_name[:-1]
res = run_sql(query, (low, high))
for row in res:
if not state.has_key(row[0]):
state[row[0]] = []
state[row[0]].append(row[1])
ok = 1 # will hold info on whether we will be able to repair
for recID in state.keys():
if not 'TEMPORARY' in state[recID]:
if 'FUTURE' in state[recID]:
if 'CURRENT' not in state[recID]:
write_message("EMERGENCY: Index record %d is in inconsistent state. Can't repair it." % recID)
ok = 0
else:
write_message("EMERGENCY: Inconsistency in index record %d detected" % recID)
query = """DELETE FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
run_sql(query, (recID,))
write_message("EMERGENCY: Inconsistency in record %d repaired." % recID)
else:
if 'FUTURE' in state[recID] and not 'CURRENT' in state[recID]:
self.recIDs_in_mem.append([recID, recID])
# Get the words file
query = """SELECT type,termlist FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
write_message(query, verbose=9)
res = run_sql(query, (recID,))
for row in res:
wlist = deserialize_via_marshal(row[1])
write_message("Words are %s " % wlist, verbose=9)
if row[0] == 'TEMPORARY':
sign = 1
else:
sign = -1
for word in wlist:
self.put(recID, word, sign)
else:
write_message("EMERGENCY: %s for %d is in inconsistent "
"state. Couldn't repair it." % (self.table_name,
recID), stream=sys.stderr)
ok = 0
if not ok:
error_message = "Unrepairable errors found. You should check " \
"consistency of the %s - %sR tables. Deleting affected " \
"TEMPORARY and FUTURE entries from these tables is " \
"recommended; see the BibIndex Admin Guide." % \
(self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibindex',
authorization_msg="BibIndex Task Submission",
description="""Examples:
\t%s -a -i 234-250,293,300-500 -u admin@localhost
\t%s -a -w author,fulltext -M 8192 -v3
\t%s -d -m +4d -A on --flush=10000\n""" % ((sys.argv[0],) * 3), help_specific_usage=""" Indexing options:
-a, --add\t\tadd or update words for selected records
-d, --del\t\tdelete words for selected records
-i, --id=low[-high]\t\tselect according to doc recID
-m, --modified=from[,to]\tselect according to modification date
-c, --collection=c1[,c2]\tselect according to collection
-R, --reindex\treindex the selected indexes from scratch
Repairing options:
-k, --check\t\tcheck consistency for all records in the table(s)
-r, --repair\t\ttry to repair all records in the table(s)
Specific options:
-w, --windex=w1[,w2]\tword/phrase indexes to consider (all)
-M, --maxmem=XXX\tmaximum memory usage in kB (no limit)
-f, --flush=NNN\t\tfull consistent table flush after NNN records (10000)
--force\t\tforce indexing of all records for provided indexes
-Z, --remove-dependent-index=w name of an index for removing from virtual index
-l --all-virtual\t\t set of all virtual indexes; the same as: -w virtual_ind1, virtual_ind2, ...
""",
version=__revision__,
specific_params=("adi:m:c:w:krRM:f:oZ:l", [
"add",
"del",
"id=",
"modified=",
"collection=",
"windex=",
"check",
"repair",
"reindex",
"maxmem=",
"flush=",
"force",
"remove-dependent-index=",
"all-virtual"
]),
task_stop_helper_fnc=task_stop_table_close_fnc,
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_check_options():
"""Check for options compatibility."""
if task_get_option("reindex"):
if task_get_option("cmd") != "add" or task_get_option('id') or task_get_option('collection'):
print >> sys.stderr, "ERROR: You can use --reindex only when adding modified record."
return False
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ("-a", "--add"):
task_set_option("cmd", "add")
if ("-x", "") in opts or ("--del", "") in opts:
raise StandardError("Can not have --add and --del at the same time!")
elif key in ("-k", "--check"):
task_set_option("cmd", "check")
elif key in ("-r", "--repair"):
task_set_option("cmd", "repair")
elif key in ("-d", "--del"):
task_set_option("cmd", "del")
elif key in ("-i", "--id"):
task_set_option('id', task_get_option('id') + split_ranges(value))
elif key in ("-m", "--modified"):
task_set_option("modified", get_date_range(value))
elif key in ("-c", "--collection"):
task_set_option("collection", value)
elif key in ("-R", "--reindex"):
task_set_option("reindex", True)
elif key in ("-w", "--windex"):
task_set_option("windex", value)
elif key in ("-M", "--maxmem"):
task_set_option("maxmem", int(value))
if task_get_option("maxmem") < base_process_size + 1000:
raise StandardError("Memory usage should be higher than %d kB" % \
(base_process_size + 1000))
elif key in ("-f", "--flush"):
task_set_option("flush", int(value))
elif key in ("-o", "--force"):
task_set_option("force", True)
elif key in ("-Z", "--remove-dependent-index",):
task_set_option("remove-dependent-index", value)
elif key in ("-l", "--all-virtual",):
task_set_option("all-virtual", True)
else:
return False
return True
def task_stop_table_close_fnc():
""" Close tables to STOP. """
global _last_word_table
if _last_word_table:
_last_word_table.put_into_db()
def get_recIDs_by_date_bibliographic(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
@param wordtable_type: can be 'Words', 'Pairs' or 'Phrases'
"""
index_id = get_index_id_from_index_name(index_name)
if not dates:
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
if dates[1] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date >= %s""",
(dates[0],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s
AND status<>'DELETED'""",
(dates[0],)))
elif dates[0] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date <= %s""",
(dates[1],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date
AND modification_date <= %s
AND status<>'DELETED'""",
(dates[1],)))
else:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date >= %s AND
b.modification_date <= %s""",
(dates[0], dates[1])))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s AND
modification_date <= %s AND
status<>'DELETED'""",
(dates[0], dates[1],)))
# special case of author indexes where we need to re-index
# those records that were affected by changed BibAuthorID attributions:
if index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
from invenio.bibauthorid_personid_maintenance import get_recids_affected_since
# dates[1] is ignored, since BibAuthorID API does not offer upper limit search
rec_list_author = intbitset(get_recids_affected_since(dates[0]))
res = res | rec_list_author
return set(res)
def get_recIDs_by_date_authority(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
Searches for bibliographic records connected to authority records
that have been changed.
"""
index_id = get_index_id_from_index_name(index_name)
index_tags = get_index_tags(index_name)
if not dates:
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
res = intbitset()
for tag in index_tags:
pattern = tag.replace('%', '*')
matches = fnmatch.filter(CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.keys(), pattern)
if not len(matches):
continue
for tag_match in matches:
# get the type of authority record associated with this field
auth_type = CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.get(tag_match)
# find updated authority records of this type
# dates[1] is ignored, needs dates[0] to find res
now = datetime.now()
auth_recIDs = search_pattern(p='980__a:' + auth_type) \
& search_unit_in_bibrec(str(dates[0]), str(now), search_type='m')
# now find dependent bibliographic records
for auth_recID in auth_recIDs:
# get the fix authority identifier of this authority record
control_nos = get_control_nos_from_recID(auth_recID)
# there may be multiple control number entries! (the '035' field is repeatable!)
for control_no in control_nos:
# get the bibrec IDs that refer to AUTHORITY_ID in TAG
tag_0 = tag_match[:5] + '0' # possibly do the same for '4' subfields ?
fieldvalue = '"' + control_no + '"'
res |= search_pattern(p=tag_0 + ':' + fieldvalue)
return set(res)
def get_not_updated_recIDs(modified_dates, indexes, force_all=False):
"""Finds not updated recIDs in database for indexes.
@param modified_dates: between this dates we should look for modified records
@type modified_dates: [date_old, date_new]
@param indexes: list of indexes
@type indexes: string separated by coma
@param force_all: if True all records will be taken
"""
found_recIDs = set()
write_message(CFG_BIBINDEX_UPDATE_MESSAGE)
for index in indexes:
found_recIDs |= get_recIDs_by_date_bibliographic(modified_dates, index, force_all)
found_recIDs |= get_recIDs_by_date_authority(modified_dates, index, force_all)
return list(sorted(found_recIDs))
def get_recIDs_from_cli(indexes=[]):
"""
Gets recIDs ranges from CLI for indexing when
user specified 'id' or 'collection' option or
search for modified recIDs for provided indexes
when recIDs are not specified.
@param indexes: it's a list of specified indexes, which
can be obtained from CLI with use of:
get_indexes_from_cli() function.
@type indexes: list of strings
"""
# need to first update idxINDEX table to find proper recIDs for reindexing
if task_get_option("reindex"):
for index_name in indexes:
run_sql("""UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00'
WHERE name=%s""", (index_name,))
if task_get_option("id"):
return task_get_option("id")
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID, recID])
return recIDs_range
elif task_get_option("cmd") == "add":
recs = get_not_updated_recIDs(task_get_option("modified"),
indexes,
task_get_option("force"))
recIDs_range = beautify_range_list(create_range_list(recs))
return recIDs_range
return []
def get_indexes_from_cli():
"""
Gets indexes from CLI and checks if they are
valid. If indexes weren't specified function
will return all known indexes.
"""
indexes = task_get_option("windex")
all_virtual = task_get_option("all-virtual")
if all_virtual:
indexes = filter_for_virtual_indexes(get_all_indexes())
elif not indexes:
indexes = get_all_indexes()
else:
indexes = indexes.split(",")
indexes = remove_inexistent_indexes(indexes, leave_virtual=True)
return indexes
def remove_dependent_index(virtual_indexes, dependent_index):
"""
Removes dependent index from virtual indexes.
@param virtual_indexes: names of virtual_indexes
@type virtual_indexes: list of strings
@param dependent_index: name of dependent index
@type dependent_index: string
"""
if not virtual_indexes:
write_message("You should specify a name of a virtual index...")
return
id_dependent = get_index_id_from_index_name(dependent_index)
for index_name in virtual_indexes:
index_id = get_index_id_from_index_name(index_name)
for type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.itervalues():
vit = VirtualIndexTable(index_name, type_)
vit.remove_dependent_index(dependent_index)
task_sleep_now_if_required()
query = """DELETE FROM idxINDEX_idxINDEX WHERE id_virtual=%s AND id_normal=%s"""
run_sql(query, (index_id, id_dependent))
def should_update_virtual_indexes():
"""
Decides if any virtual indexes should be updated.
Decision is made based on arguments obtained
from CLI.
"""
return task_get_option("all-virtual") or task_get_option("windex")
def update_virtual_indexes(virtual_indexes, reindex=False):
"""
Function will update all specified virtual_indexes.
@param virtual_indexes: list of index names
@param reindex: shall we reindex given v.indexes from scratch?
"""
kwargs = {}
if reindex:
kwargs.update({'table_prefix': 'tmp_'})
for index_name in virtual_indexes:
if reindex:
index_id = get_index_id_from_index_name(index_name)
init_temporary_reindex_tables(index_id)
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.set_reindex_mode()
vit.run_update()
swap_temporary_reindex_tables(index_id)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
else:
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.run_update()
task_sleep_now_if_required(can_stop_too=True)
def task_run_core():
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
"""
global _last_word_table
indexes = get_indexes_from_cli()
if len(indexes) == 0:
write_message("Specified indexes can't be found.")
return True
virtual_indexes = filter_for_virtual_indexes(indexes)
regular_indexes = list(set(indexes) - set(virtual_indexes))
# check tables consistency
if task_get_option("cmd") == "check":
for index_name in indexes:
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
_last_word_table = None
return True
# virtual index: remove dependent index
if task_get_option("remove-dependent-index"):
remove_dependent_index(indexes,
task_get_option("remove-dependent-index"))
return True
# virtual index: update
if should_update_virtual_indexes():
update_virtual_indexes(virtual_indexes, task_get_option("reindex"))
if len(regular_indexes) == 0:
return True
# regular index: initialization for Words,Pairs,Phrases
recIDs_range = get_recIDs_from_cli(regular_indexes)
recIDs_for_index = find_affected_records_for_index(regular_indexes,
recIDs_range,
(task_get_option("force") or \
task_get_option("reindex") or \
task_get_option("cmd") == "del"))
if len(recIDs_for_index.keys()) == 0:
write_message("Selected indexes/recIDs are up to date.")
# Let's work on single words!
for index_name in recIDs_for_index.keys():
index_id = get_index_id_from_index_name(index_name)
reindex_prefix = ""
if task_get_option("reindex"):
reindex_prefix = "tmp_"
init_temporary_reindex_tables(index_id, reindex_prefix)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
table_prefix=reindex_prefix,
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on pairs now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
table_prefix=reindex_prefix,
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on phrases now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
table_prefix=reindex_prefix,
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
if not task_get_option("id") and not task_get_option("collection"):
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if task_get_option("reindex"):
swap_temporary_reindex_tables(index_id, reindex_prefix)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
# update modification date also for indexes that were up to date
if not task_get_option("id") and not task_get_option("collection") and \
task_get_option("cmd") == "add":
up_to_date = set(indexes) - set(recIDs_for_index.keys())
update_index_last_updated(list(up_to_date), task_get_task_param('task_starting_time'))
_last_word_table = None
return True
### okay, here we go:
if __name__ == '__main__':
main()
| 0.002705 |
"""Dependency tracking for checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import data_structures
class NotCheckpointable(object):
"""Marks instances of child classes as unsaveable using an object-based API.
Useful for marking objects which would otherwise look checkpointable because
of inheritance (e.g. through `Layer`) as not checkpointable. Inheriting from
`NotCheckpointable` does not prevent an object from being assigned to any
attributes, but will throw an error on save/restore.
"""
pass
class Checkpointable(base.CheckpointableBase):
"""Manages dependencies on other objects.
`Checkpointable` objects may have dependencies: other `Checkpointable` objects
which should be saved if the object declaring the dependency is saved. A
correctly saveable program has a dependency graph such that if changing a
global variable affects an object (e.g. changes the behavior of any of its
methods) then there is a chain of dependencies from the influenced object to
the variable.
Dependency edges have names, and are created implicitly when a
`Checkpointable` object is assigned to an attribute of another
`Checkpointable` object. For example:
```
obj = Checkpointable()
obj.v = ResourceVariable(0.)
```
The `Checkpointable` object `obj` now has a dependency named "v" on a
variable.
`Checkpointable` objects may specify `Tensor`s to be saved and restored
directly (e.g. a `Variable` indicating how to save itself) rather than through
dependencies on other objects. See
`Checkpointable._gather_saveables_for_checkpoint` for details.
"""
def __setattr__(self, name, value):
"""Support self.foo = checkpointable syntax."""
if getattr(self, "_setattr_tracking", True):
value = data_structures.sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
super(Checkpointable, self).__setattr__(name, value)
def _no_dependency(self, value):
"""Override to allow CheckpointableBase to disable dependency tracking."""
return data_structures.NoDependency(value)
| 0.002704 |
from collections import defaultdict
class Polynomial(object):
def __init__(self, *args):
self.p = defaultdict(int)
self.sorted = list(enumerate(args))
self.p.update(self.sorted)
def __repr__(self):
return 'Polynomial%s' % self.sorted
def __call__(self, x):
return sum([a*(x**i) for i, a in self.p.items()])
def __iadd__(self, other):
for k, v in other.p.items():
self.p[k] += v
self.sorted = sorted(self.p.items())
return self
def __isub__(self, other):
for k, v in other.p.items():
self.p[k] -= v
self.sorted = sorted(self.p.items())
return self
def __add__(self, other):
new_p = self.p.copy()
for k, v in other.p.items():
new_p[k] += v
poly = Polynomial()
poly.p = new_p
return poly
def __sub__(self, other):
new_p = self.p.copy()
for k, v in other.p.items():
new_p[k] -= v
poly = Polynomial()
poly.p = new_p
return poly
def __eq__(self, other):
return self.p == other.p
def __ne__(self, other):
return self.p != other.p
if __name__ == '__main__':
# p(x) := 3x^2 + 2x + 1
p = Polynomial(1, 2, 3)
# 6 = p(1)
assert p(1) == 6
# q(x) := 5x^2 + 1
q = Polynomial(1, 0, 5)
# 6 = q(1)
assert q(1) == 6
# r(x) := p(x) + q(x) = 8x^2 + 2x + 1
r = p + q
# 12 = r(1) = q(1) + p(1)
assert r(1) == p(1) + q(1)
assert r == (p + q)
p += q
assert r == p
assert r != q
p -= q
assert r != p
assert r == (p + q)
| 0 |
#!/usr/bin/env python3
#
# Copyright (C) 2019 jayson
#
import json
import random
import logging
import utils.logging
from utils import connection
from config import RABBITMQ_MASTER
from pika.adapters.blocking_connection import BlockingChannel
from pika.spec import Basic, BasicProperties
def consumer(ch: BlockingChannel, method: Basic.Deliver, properties: BasicProperties, body: bytes):
data = json.loads(body)
if hasattr(utils.logging, method.routing_key):
getattr(utils.logging, method.routing_key)('{:<12} {:<8} {:<4} {:<8}: {}'.format(
data['from'], data['producer'], data['index'], method.routing_key, data['body']))
if random.randint(0, 20) == 1:
return ch.basic_reject(delivery_tag=method.delivery_tag, requeue=True)
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == '__main__':
utils.logging.init_logging(logging.DEBUG)
with connection.get_exchange_connection(RABBITMQ_MASTER, 'logs_routing', 'direct') as channel:
channel.basic_qos(prefetch_count=1)
channel.queue_declare('logs_routing_printer', durable=True, auto_delete=True)
channel.queue_bind(queue='logs_routing_printer', exchange='logs_routing', routing_key='debug')
channel.queue_bind(queue='logs_routing_printer', exchange='logs_routing', routing_key='info')
channel.queue_bind(queue='logs_routing_printer', exchange='logs_routing', routing_key='warning')
channel.queue_bind(queue='logs_routing_printer', exchange='logs_routing', routing_key='error')
channel.basic_consume('logs_routing_printer', consumer, auto_ack=False)
channel.start_consuming()
| 0.005896 |
import g
from tkrep import *
ptt_port=IntVar()
CatPort=StringVar()
PttPort=StringVar()
CatPort.set('None')
PttPort.set('None')
ndevin=IntVar()
ndevout=IntVar()
DevinName=StringVar()
DevoutName=StringVar()
dBm=IntVar()
dBm.set(37)
pttmode=StringVar()
serial_rate=IntVar()
serial_rate.set(4800)
databits=IntVar()
databits.set(8)
stopbits=IntVar()
stopbits.set(2)
serial_handshake=StringVar()
cat_enable=IntVar()
rig=StringVar()
rig.set('214 Kenwood TS-2000')
rignum=IntVar()
inbad=IntVar()
outbad=IntVar()
pttmode.set('DTR')
serial_handshake.set('None')
pttlist=("CAT","DTR","RTS","VOX")
baudlist=(1200,4800,9600,19200,38400,57600)
hslist=("None","XONXOFF","Hardware")
pwrlist=(-30,-27,-23,-20,-17,-13,-10,-7,-3, \
0,3,7,10,13,17,20,23,27,30,33,37,40,43,47,50,53,57,60)
if g.Win32:
serialportlist=("None","COM1","COM2","COM3","COM4","COM5","COM6", \
"COM7","COM8","COM9","COM10","COM11","COM12","COM13","COM14", \
"COM15","USB")
else:
serialportlist=("None","/dev/ttyS0","/dev/ttyS1","/dev/ttyUSB0", \
"/dev/ttyUSB1","/dev/ttyUSB2","/dev/ttyUSB3","/dev/ttyUSB4", \
"/dev/ttyUSB5","/dev/ttyUSB6","/dev/ttyUSB7","/dev/ttyUSB8")
datalist=(7,8)
stoplist=(1,2)
indevlist=[]
outdevlist=[]
riglist=[]
MyCall=StringVar()
MyGrid=StringVar()
try:
f=open('audio_caps','r')
s=f.readlines()
f.close
t="Input Devices:\n"
for i in range(len(s)):
col=s[i].split()
if int(col[1])>0:
t=str(i) + s[i][29:]
t=t[:len(t)-1]
indevlist.append(t)
for i in range(len(s)):
col=s[i].split()
if int(col[2])>0:
t=str(i) + s[i][29:]
t=t[:len(t)-1]
outdevlist.append(t)
except:
pass
try:
f=open('hamlib_rig_numbers','r')
s=f.readlines()
f.close
for i in range(len(s)):
t=s[i]
riglist.append(t[:len(t)-1])
except:
pass
#------------------------------------------------------ audin
#def audin(event=NONE):
# g.DevinName.set(DevinName.get())
# g.ndevin.set(int(DevinName.get()[:2]))
#------------------------------------------------------ audout
#def audout(event=NONE):
# g.DevoutName.set(DevoutName.get())
# g.ndevout.set(int(DevoutName.get()[:2]))
#------------------------------------------------------ rig_number
#def rig_number(event=NONE):
# rignum.set(int(rig.get()[:4]))
#------------------------------------------------------- chkcall
def chkcall(t):
r=-1
n=len(t)
if n>=3 and n<=10:
i1=t.count('/')
i2=t.find('/')
if i1==1 and i2>0:
t=t[:i2-1]+t[i2+1:]
if t.isalnum() and t.find(' ')<0:
r=1
return r
#------------------------------------------------------- chkgrid
def chkgrid(t):
r=-1
n=len(t)
if n==4 or n==6:
if int(t[0:1],36)>=10 and int(t[0:1],36)<=27 and \
int(t[1:2],36)>=10 and int(t[1:2],36)<=27 and \
int(t[2:3],36)>=0 and int(t[2:3],36)<=9 and \
int(t[3:4],36)>=0 and int(t[3:4],36)<=9: r=1
if r==1 and n==6:
r=-1
if int(t[4:5],36)>=10 and int(t[4:5],36)<=33 and \
int(t[5:6],36)>=10 and int(t[5:6],36)<=33: r=1
return r
| 0.052445 |
# -*- coding: utf-8 -*-
"""Test plugin system."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from textwrap import dedent
from pytest import yield_fixture, raises
from ..plugin import (IPluginRegistry,
IPlugin,
get_plugin,
discover_plugins,
attach_plugins
)
from phylib.utils._misc import write_text
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def no_native_plugins():
# Save the plugins.
plugins = IPluginRegistry.plugins
IPluginRegistry.plugins = []
yield
IPluginRegistry.plugins = plugins
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
def test_plugin_1(no_native_plugins):
class MyPlugin(IPlugin):
pass
assert IPluginRegistry.plugins == [MyPlugin]
assert get_plugin('MyPlugin').__name__ == 'MyPlugin'
with raises(ValueError):
get_plugin('unknown')
def test_discover_plugins(tempdir, no_native_plugins):
path = tempdir / 'my_plugin.py'
contents = '''from phy import IPlugin\nclass MyPlugin(IPlugin): pass'''
write_text(path, contents)
plugins = discover_plugins([tempdir])
assert plugins
assert plugins[0].__name__ == 'MyPlugin'
def test_attach_plugins(tempdir):
class MyController(object):
pass
write_text(tempdir / 'plugin1.py', dedent(
'''
from phy import IPlugin
class MyPlugin1(IPlugin):
def attach_to_controller(self, controller):
controller.plugin1 = True
'''))
class MyPlugin2(IPlugin):
def attach_to_controller(self, controller):
controller.plugin2 = True
contents = dedent('''
c = get_config()
c.Plugins.dirs = ['%s']
c.MyController.plugins = ['MyPlugin1']
''' % tempdir)
write_text(tempdir / 'phy_config.py', contents)
controller = MyController()
attach_plugins(controller, plugins=['MyPlugin2'], config_dir=tempdir)
assert controller.plugin1 == controller.plugin2 is True
| 0.002461 |
#!/usr/bin/python
import os
import os.path
import datetime
import logging
import sys
import yaml
from optparse import OptionParser
from helpers.resources_listing import ResourcesListing
from layout_parser import LayoutParser
from lxml import etree
from xml.etree import ElementTree
from lxml.etree import XMLSyntaxError
class UIXML(object):
def __init__(self):
self.log = logging.getLogger("ui_xml")
self.log.setLevel(logging.DEBUG)
def parse_file(self, layout_file):
self.log.info('parsing %s', layout_file)
def write_xml_file(self, xml_file, root):
tree = root.getroottree()
# Strip the merge tag
etree.strip_tags(tree, 'merge')
with open(xml_file, 'w+') as f:
f.write(etree.tostring(tree, pretty_print=True, encoding='utf-8'))
f.close()
def start_main(self, source_dir, target_dir):
count = 1
# Iterate over the unpacked apk files in the source directory.
for apk_dir in [os.path.join(source_dir, d) for d in
os.listdir(source_dir)]:
self.log.info('Parsing %s', apk_dir)
package_name = None
version_name = None
version_code = None
if not os.path.exists(os.path.join(apk_dir, 'AndroidManifest.xml')):
self.log.error('AndroidManifest.xml is missing in %s . ' +
'Unable to find package and version info.', apk_dir)
continue
version_name, version_code, package_name = self.get_app_info_from_manifest(
os.path.join(apk_dir, 'AndroidManifest.xml'))
if not package_name:
self.log.error('Failed to find package name for %s', apk_dir)
continue
if not version_code or not version_name:
apktool_yml = os.path.join(apk_dir, 'apktool.yml')
if not os.path.exists(apktool_yml):
self.log.error('Failed to find apktool.yml file for %s',
apk_dir)
continue
version_code, version_name = self.get_app_versions_from_yaml(
apktool_yml)
if not version_code or not version_name:
self.log.error('Failed to find app version for %s', apk_dir)
continue
ui_xml_dir = target_dir
if target_dir is None:
ui_xml_dir = os.path.abspath(os.path.join(apk_dir, 'ui-xml'))
if not os.path.exists(ui_xml_dir):
os.makedirs(ui_xml_dir)
ui_xml_file = os.path.join(ui_xml_dir, package_name + '-' +
version_code + '.xml')
root = etree.Element('App')
root.set('name', package_name)
root.set('version_code', version_code)
# If the UI xml file exists, delete it and create a new one.
if(os.path.exists(ui_xml_file)):
os.remove(ui_xml_file)
# check if the directory is for an unpacked apk. i.e, contains
# res/ sub directory.
if not os.path.exists(os.path.abspath(
os.path.join(apk_dir, 'res'))):
self.log.error('no res directory in %s', apk_dir)
continue
layout_dirs = ResourcesListing.get_all_layout_dirs(apk_dir)
self.log.info("%i - APK %s has %i layout directories", count,
apk_dir, len(layout_dirs))
count += 1
for layout_dir in layout_dirs:
dir_element = self.add_directory_element(root, layout_dir)
layout_files = ResourcesListing.get_all_layout_files(layout_dir)
for layout_file in layout_files:
# Do not add layout files that start with <merge>
if self.layout_starts_with_merge(layout_file):
continue
file_element= self.add_file_element(dir_element, layout_file)
self.add_layout_elements(file_element, layout_file, apk_dir)
# Add res/xml/ directory which contains various XML configuration files
self.do_directory(apk_dir, ['res', 'xml'], root)
# Add res/menu
self.do_directory(apk_dir, ['res', 'menu'], root)
self.write_xml_file(ui_xml_file, root)
self.log.info('UI XML has been written to %s' %(ui_xml_file))
def do_directory(self, apk_dir, sub_dirs, root):
xml_dir = os.path.abspath(os.path.join(apk_dir, sub_dirs[0], sub_dirs[1]))
if os.path.exists(xml_dir) and len(os.listdir(xml_dir)) > 0:
dir_element = self.add_directory_element(root, xml_dir)
xml_files = []
for x in os.listdir(xml_dir):
x = os.path.join(xml_dir, x)
if os.path.isfile(x):
xml_files.append(x)
for xml_file in xml_files:
if self.layout_starts_with_merge(xml_file):
continue
file_element= self.add_file_element(dir_element, xml_file)
self.add_layout_elements(file_element, xml_file, apk_dir)
def add_directory_element(self, root, dir_name):
dir_element = etree.Element('Directory')
dir_element.set('directory_name', os.path.basename(dir_name))
root.append(dir_element)
return dir_element
def add_file_element(self, dir_element, layout_file):
file_element = etree.Element('File')
file_element.set('file_name', os.path.basename(layout_file))
dir_element.append(file_element)
return file_element
def add_layout_elements(self, file_element, layout_file, apk_dir):
layout_tree = LayoutParser(self.log).parse(layout_file, apk_dir)
if layout_tree is not None:
file_element.append(layout_tree.getroot())
def get_app_info_from_manifest(self, manifest_file):
version_name = version_code = package_name = None
try:
tree = etree.parse(manifest_file)
root = tree.getroot()
package_name = root.get('package')
version_code = root.get('versionCode')
version_name = root.get('versionName')
except XMLSyntaxError:
self.log.error('Invalid XML in the AndroidManifest file %s', manifest_file)
return version_name, version_code, package_name
def get_app_versions_from_yaml(self, yaml_file):
# Parse apktool.yaml to get the version code and name values
version_code = version_name = None
try:
self.log.info("Processing file %s.", yaml_file)
if not os.path.isfile(yaml_file):
self.log.error("YAML file does not exist %s", yaml_file)
return None
doc = None
with open(yaml_file, 'r') as f:
doc = yaml.load(f)
version_code = doc.get('versionInfo', None).get('versionCode', None)
version_name = doc.get('versionInfo', None).get(
'versionName', None)
except yaml.YAMLError:
self.log.error("Error in apktool yaml file: %s", yaml_file)
except AttributeError:
self.log.error("sdk versions info is missing in yaml file: %s", yaml_file)
return version_code, version_name
def get_version_name_from_strings_xml(self, strings_xml_file,
attribute_name):
tree = ET.parse(strings_xml_file)
root = tree.getroot()
for element in root.findall('string'):
if(element.get('name') == attribute_name):
return element.text
def layout_starts_with_merge(self, layout_file):
try:
if etree.parse(layout_file).getroot().tag == 'merge':
return True
except XMLSyntaxError:
self.log.error('Invalid XML Syntax for %s', layout_file)
return False
def main(self, args):
start_time = datetime.datetime.now()
# Configure logging
logging_file = None
logging_level = logging.ERROR
# Create formatter
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# Create console logger and set its formatter and level
logging_console = logging.StreamHandler(sys.stdout)
logging_console.setFormatter(formatter)
logging_console.setLevel(logging.DEBUG)
# Add the console logger
self.log.addHandler(logging_console)
usage_info = "python %prog <root_unpacked_apk_directories> [options]"
description_paragraph = ("DESCRIPTION: A tool for parsing Android xml"
" layout files and storing them in one XML file to simplify"
" UI analysis. It resolves resource references"
" (e.g., @string/cancel_btn) and embeded layouts (e.g., using the"
" <include/> and <merge/>" " tags). The final xml file is saved"
" inside the unpacked apk directory under a sub-directory named ui-xml"
" or to a target directory using the -o option")
# command line parser
parser = OptionParser(
usage=usage_info, description = description_paragraph,
version="%prog 1.0")
parser.add_option("-l", "--log", dest="log_file",
help="write logs to FILE.", metavar="FILE")
parser.add_option("-v", "--verbose", dest="verbose", default=0,
action="count", help="increase verbosity.")
parser.add_option("-o", "--output", dest="target_dir",
help="The name of the directory that the output files get " +
"written to. Default is to write files " +
"inside the unpacked apk directory under a sub-directory named ui-xml.",
metavar="DIR")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments.")
if options.log_file:
logging_file = logging.FileHandler(options.log_file, mode='a',
encoding='utf-8', delay=False)
logging_file.setLevel(logging_level)
logging_file.setFormatter(formatter)
self.log.addHandler(logging_file)
if options.verbose:
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging_level = levels[min(len(levels) - 1, options.verbose)]
# Check if source and target directories exist
source_dir = target_dir = None
if options.target_dir:
if os.path.exists(options.target_dir):
target_dir = os.path.abspath(options.target_dir)
else:
sys.exit("Error: target directory " + options.target_dir + " does not exist.")
if os.path.isdir(args[0]):
source_dir = os.path.abspath(args[0])
else:
sys.exit("Error: source directory " + args[0] + " does not exist.")
self.start_main(source_dir, target_dir)
print("======================================================")
print("Finished after " + str(datetime.datetime.now() - start_time))
print("======================================================")
if __name__ == '__main__':
UIXML().main(sys.argv[1:])
| 0.004373 |
#!/usr/bin/python
# Based on getifaddrs.py from pydlnadms [http://code.google.com/p/pydlnadms/].
# Ported to Mac OS X (tested only on Mountain Lion 10.8.2) through updating
# the structures.
from socket import AF_INET, AF_INET6, inet_ntop
from ctypes import (
Structure, Union, POINTER,
pointer, get_errno, cast,
c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint8, c_char, c_uint16, c_uint32
)
import ctypes.util
import ctypes
class struct_sockaddr(Structure):
_fields_ = [
('sa_len', c_uint8),
('sa_family', c_uint8),
('sa_data', c_byte * 14)
]
class struct_sockaddr_in(Structure):
_fields_ = [
('sin_len', c_uint8),
('sin_family', c_uint8),
('sin_port', c_uint16),
('sin_addr', c_byte * 4),
('sin_zero', c_char * 8)
]
class struct_sockaddr_in6(Structure):
_fields_ = [
('sin6_len', c_uint8),
('sin6_family', c_uint8),
('sin6_port', c_uint16),
('sin6_flowinfo', c_uint32),
('sin6_addr', c_byte * 16),
('sin6_scope_id', c_uint32)
]
class struct_ifaddrs(Structure):
pass
struct_ifaddrs._fields_ = [
('ifa_next', POINTER(struct_ifaddrs)),
('ifa_name', c_char_p),
('ifa_flags', c_uint),
('ifa_addr', POINTER(struct_sockaddr)),
('ifa_netmask', POINTER(struct_sockaddr)),
('ifa_dstaddr', POINTER(struct_sockaddr)),
('ifa_data', c_void_p)
]
libc = ctypes.CDLL(ctypes.util.find_library('c'))
def ifap_iter(ifap):
ifa = ifap.contents
while True:
yield ifa
if not ifa.ifa_next:
break
ifa = ifa.ifa_next.contents
def getfamaddr(sa):
family = sa.sa_family
addr = None
if family == AF_INET:
sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
addr = inet_ntop(family, sa.sin_addr)
elif family == AF_INET6:
sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
addr = inet_ntop(family, sa.sin6_addr)
return family, addr
class NetworkInterface(object):
def __init__(self, name):
self.name = name
self.index = libc.if_nametoindex(name)
self.addresses = {}
def __str__(self):
return "%s [index=%d, IPv4=%s, IPv6=%s]" % (
self.name, self.index,
self.addresses.get(AF_INET),
self.addresses.get(AF_INET6))
def get_network_interfaces():
ifap = POINTER(struct_ifaddrs)()
result = libc.getifaddrs(pointer(ifap))
if result != 0:
raise OSError(get_errno())
del result
try:
retval = {}
for ifa in ifap_iter(ifap):
name = ifa.ifa_name
i = retval.get(name)
if not i:
i = retval[name] = NetworkInterface(name)
family, addr = getfamaddr(ifa.ifa_addr.contents)
if addr:
i.addresses[family] = addr
return retval.values()
finally:
libc.freeifaddrs(ifap)
if __name__ == '__main__':
print [str(ni) for ni in get_network_interfaces()] | 0.008215 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am a support module for making SOCKSv4 servers with twistd.
"""
from twisted.protocols import socks
from twisted.python import usage
from twisted.application import internet
class Options(usage.Options):
synopsis = "[-i <interface>] [-p <port>] [-l <file>]"
optParameters = [["interface", "i", "127.0.0.1", "local interface to which we listen"],
["port", "p", 1080, "Port on which to listen"],
["log", "l", None, "file to log connection data to"]]
compData = usage.Completions(
optActions={"log": usage.CompleteFiles("*.log"),
"interface": usage.CompleteNetInterfaces()}
)
longdesc = "Makes a SOCKSv4 server."
def makeService(config):
if config["interface"] != "127.0.0.1":
print
print "WARNING:"
print " You have chosen to listen on a non-local interface."
print " This may allow intruders to access your local network"
print " if you run this on a firewall."
print
t = socks.SOCKSv4Factory(config['log'])
portno = int(config['port'])
return internet.TCPServer(portno, t, interface=config['interface'])
| 0.002408 |
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0301,C0111,W0232,R0201,R0903,W0221
# Copyright (C) 2006-2020 CS GROUP - France
# License: GNU GPL v2 <http://www.gnu.org/licenses/gpl-2.0.html>
from __future__ import unicode_literals
from vigilo.vigiconf.lib.confclasses.validators import arg, Float
from vigilo.vigiconf.lib.confclasses.test import Test
from vigilo.common.gettext import l_
class Load(Test):
"""Check the load of a host"""
oids = [".1.3.6.1.4.1.2021.10.1.5.1"]
@arg(
'warn', Float,
l_('WARNING threshold'),
l_("""
The WARNING and CRITICAL thresholds apply to the "Load 01" value,
and are automatically computed for the "Load 05" and "Load 15"
values by removing respectively 1 and 2 units.
""")
)
@arg(
'crit', Float,
l_('CRITICAL threshold'),
l_("""
The WARNING and CRITICAL thresholds apply to the "Load 01" value,
and are automatically computed for the "Load 05" and "Load 15"
values by removing respectively 1 and 2 units.
""")
)
def add_test(self, warn=20, crit=30):
# Load 01
self.add_collector_service("Load 01", "simple_factor",
[warn, crit, 1.0/100, "Load: %2.2f"],
[ "GET/.1.3.6.1.4.1.2021.10.1.5.1" ])
self.add_collector_metro("Load 01", "directValue", [],
[ "GET/.1.3.6.1.4.1.2021.10.1.5.1" ], "GAUGE")
# Load 05
self.add_collector_service("Load 05", "simple_factor",
[warn-1, crit-1, 1.0/100, "Load: %2.2f"],
[ "GET/.1.3.6.1.4.1.2021.10.1.5.2" ])
self.add_collector_metro("Load 05", "directValue", [],
[ "GET/.1.3.6.1.4.1.2021.10.1.5.2" ], "GAUGE")
# Load 15
self.add_collector_service("Load 15", "simple_factor",
[warn-2, crit-2, 1.0/100, "Load: %2.2f"],
[ "GET/.1.3.6.1.4.1.2021.10.1.5.3" ])
self.add_collector_metro("Load 15", "directValue", [],
[ "GET/.1.3.6.1.4.1.2021.10.1.5.3" ], "GAUGE")
self.add_graph("Load", [ "Load 01", "Load 05", "Load 15" ], "areas",
"load", group="Performance", factors={"Load 01": 0.01,
"Load 05": 0.01,
"Load 15": 0.01})
# vim:set expandtab tabstop=4 shiftwidth=4:
| 0.007987 |
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import das_hash
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = das_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'cee2caff'
if 'genesis' not in settings:
settings['genesis'] = '00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| 0.029929 |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Data namespace.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/gdata/docs/2.0/elements.html
"""
__author__ = '[email protected] (Jeff Scudder)'
import os
import atom.core
import atom.data
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
GD_TEMPLATE = GDATA_TEMPLATE
OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1/}%s'
BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
EVENT_LOCATION = 'http://schemas.google.com/g/2005#event'
ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate'
PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking'
CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative'
CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public'
OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent'
CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat'
INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox'
SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent'
SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam'
STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred'
UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread'
BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc'
CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc'
SENDER = 'http://schemas.google.com/g/2005#message.from'
REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to'
TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to'
ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant'
CALLBACK_REL = 'http://schemas.google.com/g/2005#callback'
CAR_REL = 'http://schemas.google.com/g/2005#car'
COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main'
FAX_REL = 'http://schemas.google.com/g/2005#fax'
HOME_REL = 'http://schemas.google.com/g/2005#home'
HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax'
ISDN_REL = 'http://schemas.google.com/g/2005#isdn'
MAIN_REL = 'http://schemas.google.com/g/2005#main'
MOBILE_REL = 'http://schemas.google.com/g/2005#mobile'
OTHER_REL = 'http://schemas.google.com/g/2005#other'
OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax'
PAGER_REL = 'http://schemas.google.com/g/2005#pager'
RADIO_REL = 'http://schemas.google.com/g/2005#radio'
TELEX_REL = 'http://schemas.google.com/g/2005#telex'
TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd'
WORK_REL = 'http://schemas.google.com/g/2005#work'
WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax'
WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile'
WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager'
NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting'
OVERALL_REL = 'http://schemas.google.com/g/2005#overall'
PRICE_REL = 'http://schemas.google.com/g/2005#price'
QUALITY_REL = 'http://schemas.google.com/g/2005#quality'
EVENT_REL = 'http://schemas.google.com/g/2005#event'
EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate'
EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking'
AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM'
MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN'
YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO'
SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE'
QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ'
GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ'
JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER'
REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular'
REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews'
MAIL_BOTH = 'http://schemas.google.com/g/2005#both'
MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters'
MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels'
MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither'
GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general'
LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local'
OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional'
REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required'
ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted'
ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined'
ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited'
ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative'
FULL_PROJECTION = 'full'
VALUES_PROJECTION = 'values'
BASIC_PROJECTION = 'basic'
PRIVATE_VISIBILITY = 'private'
PUBLIC_VISIBILITY = 'public'
ACL_REL = 'http://schemas.google.com/acl/2007#accessControlList'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class LinkFinder(atom.data.LinkFinder):
"""Mixin used in Feed and Entry classes to simplify link lookups by type.
Provides lookup methods for edit, edit-media, post, ACL and other special
links which are common across Google Data APIs.
"""
def find_html_link(self):
"""Finds the first link with rel of alternate and type of text/html."""
for link in self.link:
if link.rel == 'alternate' and link.type == 'text/html':
return link.href
return None
FindHtmlLink = find_html_link
def get_html_link(self):
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
GetHtmlLink = get_html_link
def find_post_link(self):
"""Get the URL to which new entries should be POSTed.
The POST target URL is used to insert new entries.
Returns:
A str for the URL in the link with a rel matching the POST type.
"""
return self.find_url('http://schemas.google.com/g/2005#post')
FindPostLink = find_post_link
def get_post_link(self):
return self.get_link('http://schemas.google.com/g/2005#post')
GetPostLink = get_post_link
def find_acl_link(self):
acl_link = self.get_acl_link()
if acl_link:
return acl_link.href
return None
FindAclLink = find_acl_link
def get_acl_link(self):
"""Searches for a link or feed_link (if present) with the rel for ACL."""
acl_link = self.get_link(ACL_REL)
if acl_link:
return acl_link
elif hasattr(self, 'feed_link'):
for a_feed_link in self.feed_link:
if a_feed_link.rel == ACL_REL:
return a_feed_link
return None
GetAclLink = get_acl_link
def find_feed_link(self):
return self.find_url('http://schemas.google.com/g/2005#feed')
FindFeedLink = find_feed_link
def get_feed_link(self):
return self.get_link('http://schemas.google.com/g/2005#feed')
GetFeedLink = get_feed_link
def find_previous_link(self):
return self.find_url('previous')
FindPreviousLink = find_previous_link
def get_previous_link(self):
return self.get_link('previous')
GetPreviousLink = get_previous_link
class TotalResults(atom.core.XmlElement):
"""opensearch:TotalResults for a GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults',
OPENSEARCH_TEMPLATE_V2 % 'totalResults')
class StartIndex(atom.core.XmlElement):
"""The opensearch:startIndex element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex',
OPENSEARCH_TEMPLATE_V2 % 'startIndex')
class ItemsPerPage(atom.core.XmlElement):
"""The opensearch:itemsPerPage element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage',
OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage')
class ExtendedProperty(atom.core.XmlElement):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_qname = GDATA_TEMPLATE % 'extendedProperty'
name = 'name'
value = 'value'
def get_xml_blob(self):
"""Returns the XML blob as an atom.core.XmlElement.
Returns:
An XmlElement representing the blob's XML, or None if no
blob was set.
"""
if self._other_elements:
return self._other_elements[0]
else:
return None
GetXmlBlob = get_xml_blob
def set_xml_blob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting member elements
in this object.
Args:
blob: str or atom.core.XmlElement representing the XML blob stored in
the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
if isinstance(blob, atom.core.XmlElement):
self._other_elements = [blob]
else:
self._other_elements = [atom.core.parse(str(blob))]
SetXmlBlob = set_xml_blob
class GDEntry(atom.data.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
etag = '{http://schemas.google.com/g/2005}etag'
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def is_media(self):
if self.find_edit_media_link():
return True
return False
IsMedia = is_media
def find_media_link(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if self.is_media():
return self.content.src
return None
FindMediaLink = find_media_link
class GDFeed(atom.data.Feed, LinkFinder):
"""A Feed from a GData service."""
etag = '{http://schemas.google.com/g/2005}etag'
total_results = TotalResults
start_index = StartIndex
items_per_page = ItemsPerPage
entry = [GDEntry]
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def get_generator(self):
if self.generator and self.generator.text:
return self.generator.text.strip()
return None
class BatchId(atom.core.XmlElement):
"""Identifies a single operation in a batch request."""
_qname = BATCH_TEMPLATE % 'id'
class BatchOperation(atom.core.XmlElement):
"""The CRUD operation which this batch entry represents."""
_qname = BATCH_TEMPLATE % 'operation'
type = 'type'
class BatchStatus(atom.core.XmlElement):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'status'
code = 'code'
reason = 'reason'
content_type = 'content-type'
class BatchEntry(GDEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
batch_operation = BatchOperation
batch_id = BatchId
batch_status = BatchStatus
class BatchInterrupted(atom.core.XmlElement):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'interrupted'
reason = 'reason'
success = 'success'
failures = 'failures'
parsed = 'parsed'
class BatchFeed(GDFeed):
"""A feed containing a list of batch request entries."""
interrupted = BatchInterrupted
entry = [BatchEntry]
def add_batch_entry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional)
The entry which will be sent to the server as part of the batch
request. The item must have a valid atom id so that the server
knows which entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(id=atom.data.Id(text=id_url_string))
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(type=operation_string)
self.entry.append(entry)
return entry
AddBatchEntry = add_batch_entry
def add_insert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
AddInsert = add_insert
def add_update(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
AddUpdate = add_update
def add_delete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_DELETE)
AddDelete = add_delete
def add_query(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_QUERY)
AddQuery = add_query
def find_batch_link(self):
return self.find_url('http://schemas.google.com/g/2005#batch')
FindBatchLink = find_batch_link
class EntryLink(atom.core.XmlElement):
"""The gd:entryLink element.
Represents a logically nested entry. For example, a <gd:who>
representing a contact might have a nested entry from a contact feed.
"""
_qname = GDATA_TEMPLATE % 'entryLink'
entry = GDEntry
rel = 'rel'
read_only = 'readOnly'
href = 'href'
class FeedLink(atom.core.XmlElement):
"""The gd:feedLink element.
Represents a logically nested feed. For example, a calendar feed might
have a nested feed representing all comments on entries.
"""
_qname = GDATA_TEMPLATE % 'feedLink'
feed = GDFeed
rel = 'rel'
read_only = 'readOnly'
count_hint = 'countHint'
href = 'href'
class AdditionalName(atom.core.XmlElement):
"""The gd:additionalName element.
Specifies additional (eg. middle) name of the person.
Contains an attribute for the phonetic representaton of the name.
"""
_qname = GDATA_TEMPLATE % 'additionalName'
yomi = 'yomi'
class Comments(atom.core.XmlElement):
"""The gd:comments element.
Contains a comments feed for the enclosing entry (such as a calendar event).
"""
_qname = GDATA_TEMPLATE % 'comments'
rel = 'rel'
feed_link = FeedLink
class Country(atom.core.XmlElement):
"""The gd:country element.
Country name along with optional country code. The country code is
given in accordance with ISO 3166-1 alpha-2:
http://www.iso.org/iso/iso-3166-1_decoding_table
"""
_qname = GDATA_TEMPLATE % 'country'
code = 'code'
class EmailImParent(atom.core.XmlElement):
address = 'address'
label = 'label'
rel = 'rel'
primary = 'primary'
class Email(EmailImParent):
"""The gd:email element.
An email address associated with the containing entity (which is
usually an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'email'
display_name = 'displayName'
class FamilyName(atom.core.XmlElement):
"""The gd:familyName element.
Specifies family name of the person, eg. "Smith".
"""
_qname = GDATA_TEMPLATE % 'familyName'
yomi = 'yomi'
class Im(EmailImParent):
"""The gd:im element.
An instant messaging address associated with the containing entity.
"""
_qname = GDATA_TEMPLATE % 'im'
protocol = 'protocol'
class GivenName(atom.core.XmlElement):
"""The gd:givenName element.
Specifies given name of the person, eg. "John".
"""
_qname = GDATA_TEMPLATE % 'givenName'
yomi = 'yomi'
class NamePrefix(atom.core.XmlElement):
"""The gd:namePrefix element.
Honorific prefix, eg. 'Mr' or 'Mrs'.
"""
_qname = GDATA_TEMPLATE % 'namePrefix'
class NameSuffix(atom.core.XmlElement):
"""The gd:nameSuffix element.
Honorific suffix, eg. 'san' or 'III'.
"""
_qname = GDATA_TEMPLATE % 'nameSuffix'
class FullName(atom.core.XmlElement):
"""The gd:fullName element.
Unstructured representation of the name.
"""
_qname = GDATA_TEMPLATE % 'fullName'
class Name(atom.core.XmlElement):
"""The gd:name element.
Allows storing person's name in a structured way. Consists of
given name, additional name, family name, prefix, suffix and full name.
"""
_qname = GDATA_TEMPLATE % 'name'
given_name = GivenName
additional_name = AdditionalName
family_name = FamilyName
name_prefix = NamePrefix
name_suffix = NameSuffix
full_name = FullName
class OrgDepartment(atom.core.XmlElement):
"""The gd:orgDepartment element.
Describes a department within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgDepartment'
class OrgJobDescription(atom.core.XmlElement):
"""The gd:orgJobDescription element.
Describes a job within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgJobDescription'
class OrgName(atom.core.XmlElement):
"""The gd:orgName element.
The name of the organization. Must appear within a gd:organization
element.
Contains a Yomigana attribute (Japanese reading aid) for the
organization name.
"""
_qname = GDATA_TEMPLATE % 'orgName'
yomi = 'yomi'
class OrgSymbol(atom.core.XmlElement):
"""The gd:orgSymbol element.
Provides a symbol of an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgSymbol'
class OrgTitle(atom.core.XmlElement):
"""The gd:orgTitle element.
The title of a person within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgTitle'
class Organization(atom.core.XmlElement):
"""The gd:organization element.
An organization, typically associated with a contact.
"""
_qname = GDATA_TEMPLATE % 'organization'
label = 'label'
primary = 'primary'
rel = 'rel'
department = OrgDepartment
job_description = OrgJobDescription
name = OrgName
symbol = OrgSymbol
title = OrgTitle
class When(atom.core.XmlElement):
"""The gd:when element.
Represents a period of time or an instant.
"""
_qname = GDATA_TEMPLATE % 'when'
end = 'endTime'
start = 'startTime'
value = 'valueString'
class OriginalEvent(atom.core.XmlElement):
"""The gd:originalEvent element.
Equivalent to the Recurrence ID property specified in section 4.8.4.4
of RFC 2445. Appears in every instance of a recurring event, to identify
the original event.
Contains a <gd:when> element specifying the original start time of the
instance that has become an exception.
"""
_qname = GDATA_TEMPLATE % 'originalEvent'
id = 'id'
href = 'href'
when = When
class PhoneNumber(atom.core.XmlElement):
"""The gd:phoneNumber element.
A phone number associated with the containing entity (which is usually
an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'phoneNumber'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class PostalAddress(atom.core.XmlElement):
"""The gd:postalAddress element."""
_qname = GDATA_TEMPLATE % 'postalAddress'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class Rating(atom.core.XmlElement):
"""The gd:rating element.
Represents a numeric rating of the enclosing entity, such as a
comment. Each rating supplies its own scale, although it may be
normalized by a service; for example, some services might convert all
ratings to a scale from 1 to 5.
"""
_qname = GDATA_TEMPLATE % 'rating'
average = 'average'
max = 'max'
min = 'min'
num_raters = 'numRaters'
rel = 'rel'
value = 'value'
class Recurrence(atom.core.XmlElement):
"""The gd:recurrence element.
Represents the dates and times when a recurring event takes place.
The string that defines the recurrence consists of a set of properties,
each of which is defined in the iCalendar standard (RFC 2445).
Specifically, the string usually begins with a DTSTART property that
indicates the starting time of the first instance of the event, and
often a DTEND property or a DURATION property to indicate when the
first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE
properties, which collectively define a recurring event and its
exceptions (but see below). (See section 4.8.5 of RFC 2445 for more
information about these recurrence component properties.) Last comes a
VTIMEZONE component, providing detailed timezone rules for any timezone
ID mentioned in the preceding properties.
Google services like Google Calendar don't generally generate EXRULE
and EXDATE properties to represent exceptions to recurring events;
instead, they generate <gd:recurrenceException> elements. However,
Google services may include EXRULE and/or EXDATE properties anyway;
for example, users can import events and exceptions into Calendar, and
if those imported events contain EXRULE or EXDATE properties, then
Calendar will provide those properties when it sends a <gd:recurrence>
element.
Note the the use of <gd:recurrenceException> means that you can't be
sure just from examining a <gd:recurrence> element whether there are
any exceptions to the recurrence description. To ensure that you find
all exceptions, look for <gd:recurrenceException> elements in the feed,
and use their <gd:originalEvent> elements to match them up with
<gd:recurrence> elements.
"""
_qname = GDATA_TEMPLATE % 'recurrence'
class RecurrenceException(atom.core.XmlElement):
"""The gd:recurrenceException element.
Represents an event that's an exception to a recurring event-that is,
an instance of a recurring event in which one or more aspects of the
recurring event (such as attendance list, time, or location) have been
changed.
Contains a <gd:originalEvent> element that specifies the original
recurring event that this event is an exception to.
When you change an instance of a recurring event, that instance becomes
an exception. Depending on what change you made to it, the exception
behaves in either of two different ways when the original recurring
event is changed:
- If you add, change, or remove comments, attendees, or attendee
responses, then the exception remains tied to the original event, and
changes to the original event also change the exception.
- If you make any other changes to the exception (such as changing the
time or location) then the instance becomes "specialized," which means
that it's no longer as tightly tied to the original event. If you
change the original event, specialized exceptions don't change. But
see below.
For example, say you have a meeting every Tuesday and Thursday at
2:00 p.m. If you change the attendance list for this Thursday's meeting
(but not for the regularly scheduled meeting), then it becomes an
exception. If you change the time for this Thursday's meeting (but not
for the regularly scheduled meeting), then it becomes specialized.
Regardless of whether an exception is specialized or not, if you do
something that deletes the instance that the exception was derived from,
then the exception is deleted. Note that changing the day or time of a
recurring event deletes all instances, and creates new ones.
For example, after you've specialized this Thursday's meeting, say you
change the recurring meeting to happen on Monday, Wednesday, and Friday.
That change deletes all of the recurring instances of the
Tuesday/Thursday meeting, including the specialized one.
If a particular instance of a recurring event is deleted, then that
instance appears as a <gd:recurrenceException> containing a
<gd:entryLink> that has its <gd:eventStatus> set to
"http://schemas.google.com/g/2005#event.canceled". (For more
information about canceled events, see RFC 2445.)
"""
_qname = GDATA_TEMPLATE % 'recurrenceException'
specialized = 'specialized'
entry_link = EntryLink
original_event = OriginalEvent
class Reminder(atom.core.XmlElement):
"""The gd:reminder element.
A time interval, indicating how long before the containing entity's start
time or due time attribute a reminder should be issued. Alternatively,
may specify an absolute time at which a reminder should be issued. Also
specifies a notification method, indicating what medium the system
should use to remind the user.
"""
_qname = GDATA_TEMPLATE % 'reminder'
absolute_time = 'absoluteTime'
method = 'method'
days = 'days'
hours = 'hours'
minutes = 'minutes'
class Agent(atom.core.XmlElement):
"""The gd:agent element.
The agent who actually receives the mail. Used in work addresses.
Also for 'in care of' or 'c/o'.
"""
_qname = GDATA_TEMPLATE % 'agent'
class HouseName(atom.core.XmlElement):
"""The gd:housename element.
Used in places where houses or buildings have names (and not
necessarily numbers), eg. "The Pillars".
"""
_qname = GDATA_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""The gd:street element.
Can be street, avenue, road, etc. This element also includes the
house number and room/apartment/flat/floor number.
"""
_qname = GDATA_TEMPLATE % 'street'
class PoBox(atom.core.XmlElement):
"""The gd:pobox element.
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually
but not always mutually exclusive with street.
"""
_qname = GDATA_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""The gd:neighborhood element.
This is used to disambiguate a street address when a city contains more
than one street with the same name, or to specify a small place whose
mail is routed through a larger postal town. In China it could be a
county or a minor city.
"""
_qname = GDATA_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""The gd:city element.
Can be city, village, town, borough, etc. This is the postal town and
not necessarily the place of residence or place of business.
"""
_qname = GDATA_TEMPLATE % 'city'
class Subregion(atom.core.XmlElement):
"""The gd:subregion element.
Handles administrative districts such as U.S. or U.K. counties that are
not used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = GDATA_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""The gd:region element.
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = GDATA_TEMPLATE % 'region'
class Postcode(atom.core.XmlElement):
"""The gd:postcode element.
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = GDATA_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
"""The gd:country element.
The name or code of the country.
"""
_qname = GDATA_TEMPLATE % 'country'
class FormattedAddress(atom.core.XmlElement):
"""The gd:formattedAddress element.
The full, unstructured postal address.
"""
_qname = GDATA_TEMPLATE % 'formattedAddress'
class StructuredPostalAddress(atom.core.XmlElement):
"""The gd:structuredPostalAddress element.
Postal address split into components. It allows to store the address
in locale independent format. The fields can be interpreted and used
to generate formatted, locale dependent address. The following elements
reperesent parts of the address: agent, house name, street, P.O. box,
neighborhood, city, subregion, region, postal code, country. The
subregion element is not used for postal addresses, it is provided for
extended uses of addresses only. In order to store postal address in an
unstructured form formatted address field is provided.
"""
_qname = GDATA_TEMPLATE % 'structuredPostalAddress'
rel = 'rel'
mail_class = 'mailClass'
usage = 'usage'
label = 'label'
primary = 'primary'
agent = Agent
house_name = HouseName
street = Street
po_box = PoBox
neighborhood = Neighborhood
city = City
subregion = Subregion
region = Region
postcode = Postcode
country = Country
formatted_address = FormattedAddress
class Where(atom.core.XmlElement):
"""The gd:where element.
A place (such as an event location) associated with the containing
entity. The type of the association is determined by the rel attribute;
the details of the location are contained in an embedded or linked-to
Contact entry.
A <gd:where> element is more general than a <gd:geoPt> element. The
former identifies a place using a text description and/or a Contact
entry, while the latter identifies a place using a specific geographic
location.
"""
_qname = GDATA_TEMPLATE % 'where'
label = 'label'
rel = 'rel'
value = 'valueString'
entry_link = EntryLink
class AttendeeType(atom.core.XmlElement):
"""The gd:attendeeType element."""
_qname = GDATA_TEMPLATE % 'attendeeType'
value = 'value'
class AttendeeStatus(atom.core.XmlElement):
"""The gd:attendeeStatus element."""
_qname = GDATA_TEMPLATE % 'attendeeStatus'
value = 'value'
class Who(atom.core.XmlElement):
"""The gd:who element.
A person associated with the containing entity. The type of the
association is determined by the rel attribute; the details about the
person are contained in an embedded or linked-to Contact entry.
The <gd:who> element can be used to specify email senders and
recipients, calendar event organizers, and so on.
"""
_qname = GDATA_TEMPLATE % 'who'
email = 'email'
rel = 'rel'
value = 'valueString'
attendee_status = AttendeeStatus
attendee_type = AttendeeType
entry_link = EntryLink
class Deleted(atom.core.XmlElement):
"""gd:deleted when present, indicates the containing entry is deleted."""
_qname = GD_TEMPLATE % 'deleted'
class Money(atom.core.XmlElement):
"""Describes money"""
_qname = GD_TEMPLATE % 'money'
amount = 'amount'
currency_code = 'currencyCode'
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource.
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.set_file_handle(file_path, content_type)
def set_file_handle(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
SetFileHandle = set_file_handle
def modify_request(self, http_request):
http_request.add_body_part(self.file_handle, self.content_type,
self.content_length)
return http_request
ModifyRequest = modify_request
| 0.007972 |
"""Unit tests for the Paver server tasks."""
import ddt
import os
from paver.easy import call_task
from .utils import PaverTestCase
EXPECTED_COFFEE_COMMAND = (
"node_modules/.bin/coffee --compile `find {platform_root}/lms "
"{platform_root}/cms {platform_root}/common -type f -name \"*.coffee\"`"
)
EXPECTED_SASS_COMMAND = (
"sass --update --cache-location /tmp/sass-cache --default-encoding utf-8 --style compressed"
" --quiet --load-path . --load-path common/static --load-path common/static/sass"
" --load-path lms/static/sass --load-path lms/static/certificates/sass"
" --load-path cms/static/sass --load-path common/static/sass"
" lms/static/sass:lms/static/css lms/static/certificates/sass:lms/static/certificates/css"
" cms/static/sass:cms/static/css common/static/sass:common/static/css"
)
EXPECTED_PREPROCESS_ASSETS_COMMAND = (
"python manage.py {system} --settings={asset_settings} preprocess_assets"
)
EXPECTED_COLLECT_STATIC_COMMAND = (
"python manage.py {system} --settings={asset_settings} collectstatic --noinput > /dev/null"
)
EXPECTED_CELERY_COMMAND = (
"python manage.py lms --settings={settings} celery worker --beat --loglevel=INFO --pythonpath=."
)
EXPECTED_RUN_SERVER_COMMAND = (
"python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}"
)
EXPECTED_INDEX_COURSE_COMMAND = (
"python manage.py {system} --settings={settings} reindex_course --setup"
)
@ddt.ddt
class TestPaverServerTasks(PaverTestCase):
"""
Test the Paver server tasks.
"""
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8030}],
)
@ddt.unpack
def test_lms(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("lms", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8031}],
)
@ddt.unpack
def test_studio(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("studio", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
[{"no-contracts": True}],
)
@ddt.unpack
def test_devstack(self, server_options):
"""
Test the "devstack" task.
"""
options = server_options.copy()
is_optimized = options.get("optimized", False)
expected_settings = "devstack_optimized" if is_optimized else options.get("settings", "devstack")
# First test with LMS
options["system"] = "lms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options, contracts_default=True)
# Then test with Studio
options["system"] = "cms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options, contracts_default=True)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset_settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
)
@ddt.unpack
def test_run_all_servers(self, options):
"""
Test the "run_all_servers" task.
"""
self.verify_run_all_servers_task(options)
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_celery(self, options):
"""
Test the "celery" task.
"""
settings = options.get("settings", "dev_with_worker")
call_task("pavelib.servers.celery", options=options)
self.assertEquals(self.task_messages, [EXPECTED_CELERY_COMMAND.format(settings=settings)])
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_update_db(self, options):
"""
Test the "update_db" task.
"""
settings = options.get("settings", "devstack")
call_task("pavelib.servers.update_db", options=options)
db_command = "python manage.py {server} --settings={settings} syncdb --migrate --traceback --pythonpath=."
self.assertEquals(
self.task_messages,
[
db_command.format(server="lms", settings=settings),
db_command.format(server="cms", settings=settings),
]
)
@ddt.data(
["lms", {}],
["lms", {"settings": "aws"}],
["cms", {}],
["cms", {"settings": "aws"}],
)
@ddt.unpack
def test_check_settings(self, system, options):
"""
Test the "check_settings" task.
"""
settings = options.get("settings", "devstack")
call_task("pavelib.servers.check_settings", args=[system, settings])
self.assertEquals(
self.task_messages,
[
"echo 'import {system}.envs.{settings}' "
"| python manage.py {system} --settings={settings} shell --plain --pythonpath=.".format(
system=system, settings=settings
),
]
)
def verify_server_task(self, task_name, options, contracts_default=False):
"""
Verify the output of a server task.
"""
settings = options.get("settings", None)
asset_settings = options.get("asset-settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
no_contracts = options.get("no-contracts", not contracts_default)
if task_name == "devstack":
system = options.get("system")
elif task_name == "studio":
system = "cms"
else:
system = "lms"
port = options.get("port", "8000" if system == "lms" else "8001")
self.reset_task_messages()
if task_name == "devstack":
args = ["studio" if system == "cms" else system]
if settings:
args.append("--settings={settings}".format(settings=settings))
if asset_settings:
args.append("--asset-settings={asset_settings}".format(asset_settings=asset_settings))
if is_optimized:
args.append("--optimized")
if is_fast:
args.append("--fast")
if no_contracts:
args.append("--no-contracts")
call_task("pavelib.servers.devstack", args=args)
else:
call_task("pavelib.servers.{task_name}".format(task_name=task_name), options=options)
expected_messages = options.get("expected_messages", [])
expected_settings = settings if settings else "devstack"
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != "devstack"
platform_root = os.getcwd()
if not is_fast:
expected_messages.append(EXPECTED_PREPROCESS_ASSETS_COMMAND.format(
system=system, asset_settings=expected_asset_settings
))
expected_messages.append("xmodule_assets common/static/xmodule")
expected_messages.append(EXPECTED_COFFEE_COMMAND.format(platform_root=platform_root))
expected_messages.append(EXPECTED_SASS_COMMAND)
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system=system, asset_settings=expected_asset_settings
))
expected_run_server_command = EXPECTED_RUN_SERVER_COMMAND.format(
system=system,
settings=expected_settings,
port=port,
)
if not no_contracts:
expected_run_server_command += " --contracts"
expected_messages.append(expected_run_server_command)
self.assertEquals(self.task_messages, expected_messages)
def verify_run_all_servers_task(self, options):
"""
Verify the output of a server task.
"""
settings = options.get("settings", None)
asset_settings = options.get("asset_settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
self.reset_task_messages()
call_task("pavelib.servers.run_all_servers", options=options)
expected_settings = settings if settings else "devstack"
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != "devstack"
platform_root = os.getcwd()
expected_messages = []
if not is_fast:
expected_messages.append(EXPECTED_PREPROCESS_ASSETS_COMMAND.format(
system="lms", asset_settings=expected_asset_settings
))
expected_messages.append(EXPECTED_PREPROCESS_ASSETS_COMMAND.format(
system="cms", asset_settings=expected_asset_settings
))
expected_messages.append("xmodule_assets common/static/xmodule")
expected_messages.append(EXPECTED_COFFEE_COMMAND.format(platform_root=platform_root))
expected_messages.append(EXPECTED_SASS_COMMAND)
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="lms", asset_settings=expected_asset_settings
))
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="cms", asset_settings=expected_asset_settings
))
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="lms",
settings=expected_settings,
port=8000,
)
)
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="cms",
settings=expected_settings,
port=8001,
)
)
expected_messages.append(EXPECTED_CELERY_COMMAND.format(settings="dev_with_worker"))
self.assertEquals(self.task_messages, expected_messages)
| 0.002028 |
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_image
short_description: Manage docker images.
version_added: "1.5"
description:
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
image into a repository and archiving an image to a .tar file.
- Since Ansible 2.8, it is recommended to explicitly specify the image's source (C(source=build),
C(source=load), C(source=pull) or C(source=local)). This will be required from Ansible 2.12 on.
options:
source:
description:
- "Determines where the module will try to retrieve the image from."
- "Use C(build) to build the image from a C(Dockerfile). I(path) must
be specified when this value is used."
- "Use C(load) to load the image from a C(.tar) file. I(load_path) must
be specified when this value is used."
- "Use C(pull) to pull the image from a registry."
- "Use C(local) to make sure that the image is already available on the local
docker daemon, i.e. do not try to build, pull or load the image."
- "Before Ansible 2.12, the value of this option will be auto-detected
to be backwards compatible, but a warning will be issued if it is not
explicitly specified. From Ansible 2.12 on, auto-detection will be disabled
and this option will be made mandatory."
type: str
choices:
- build
- load
- pull
- local
version_added: "2.8"
build:
description:
- "Specifies options used for building images."
type: dict
suboptions:
cache_from:
description:
- List of image names to consider as cache source.
type: list
dockerfile:
description:
- Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image.
type: str
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
type: int
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
type: path
required: yes
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
- The default is currently C(yes). This will change to C(no) in Ansible 2.12.
type: bool
rm:
description:
- Remove intermediate containers after build.
type: bool
default: yes
network:
description:
- The network to use for C(RUN) build instructions.
type: str
nocache:
description:
- Do not use cache when building an image.
type: bool
default: no
args:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21.
type: dict
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
type: dict
suboptions:
memory:
description:
- Set memory limit for build.
type: int
memswap:
description:
- Total memory (memory + swap), -1 to disable swap.
type: int
cpushares:
description:
- CPU shares (relative weight).
type: int
cpusetcpus:
description:
- CPUs in which to allow execution, e.g., "0-3", "0,1".
type: str
use_config_proxy:
description:
- If set to `yes` and a proxy configuration is specified in the docker client configuration
(by default C($HOME/.docker/config.json)), the corresponding environment variables will
be set in the container being built.
- Needs Docker SDK for Python >= 3.7.0.
type: bool
version_added: "2.8"
archive_path:
description:
- Use with state C(present) to archive an image to a .tar file.
type: path
version_added: "2.1"
load_path:
description:
- Use with state C(present) to load an image from a .tar file.
- Set I(source) to C(load) if you want to load the image. The option will
be set automatically before Ansible 2.12 if this option is used (except
if I(path) is specified as well, in which case building will take precedence).
From Ansible 2.12 on, you have to set I(source) to C(load).
type: path
version_added: "2.2"
dockerfile:
description:
- Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image.
- Please use I(build.dockerfile) instead. This option will be removed in Ansible 2.12.
type: str
version_added: "2.0"
force:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
to force tagging an image.
- Please stop using this option, and use the more specialized force options
I(force_source), I(force_absent) and I(force_tag) instead.
- This option will be removed in Ansible 2.12.
type: bool
version_added: "2.1"
force_source:
description:
- Use with state C(present) to build, load or pull an image (depending on the
value of the I(source) option) when the image already exists.
type: bool
default: false
version_added: "2.8"
force_absent:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name.
type: bool
default: false
version_added: "2.8"
force_tag:
description:
- Use with state C(present) to force tagging an image.
- Please stop using this option, and use the more specialized force options
I(force_source), I(force_absent) and I(force_tag) instead.
- This option will be removed in Ansible 2.12.
type: bool
default: false
version_added: "2.8"
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
- Please use I(build.http_timeout) instead. This option will be removed in Ansible 2.12.
type: int
version_added: "2.1"
name:
description:
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
- Note that image IDs (hashes) are not supported.
type: str
required: yes
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
- Set I(source) to C(build) if you want to build the image. The option will
be set automatically before Ansible 2.12 if this option is used. From Ansible 2.12
on, you have to set I(source) to C(build).
- Please use I(build.path) instead. This option will be removed in Ansible 2.12.
type: path
aliases:
- build_path
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
- Please use I(build.pull) instead. This option will be removed in Ansible 2.12.
- The default is currently C(yes). This will change to C(no) in Ansible 2.12.
type: bool
version_added: "2.1"
push:
description:
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
type: bool
default: no
version_added: "2.2"
rm:
description:
- Remove intermediate containers after build.
- Please use I(build.rm) instead. This option will be removed in Ansible 2.12.
type: bool
default: yes
version_added: "2.1"
nocache:
description:
- Do not use cache when building an image.
- Please use I(build.nocache) instead. This option will be removed in Ansible 2.12.
type: bool
default: no
repository:
description:
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
type: str
version_added: "2.1"
state:
description:
- Make assertions about the state of an image.
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
matching the provided name.
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
- By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
this will change in Ansible 2.12, so to make sure that you are pulling, set I(source) to C(pull). To build
the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
- "NOTE: C(state=build) is DEPRECATED and will be removed in release 2.11. Specifying C(build) will behave the
same as C(present)."
type: str
default: present
choices:
- absent
- present
- build
tag:
description:
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
I(latest).
- If C(name) parameter format is I(name:tag), then tag value from C(name) will take precedence.
type: str
default: latest
buildargs:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21.
- Please use I(build.args) instead. This option will be removed in Ansible 2.12.
type: dict
version_added: "2.2"
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
- Please use I(build.container_limits) instead. This option will be removed in Ansible 2.12.
type: dict
suboptions:
memory:
description:
- Set memory limit for build.
type: int
memswap:
description:
- Total memory (memory + swap), -1 to disable swap.
type: int
cpushares:
description:
- CPU shares (relative weight).
type: int
cpusetcpus:
description:
- CPUs in which to allow execution, e.g., "0-3", "0,1".
type: str
version_added: "2.1"
use_tls:
description:
- "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
the server's certificate is valid for the server."
- "NOTE: If you specify this option, it will set the value of the I(tls) or
I(tls_verify) parameters if not set to I(no)."
- Will be removed in Ansible 2.11.
type: str
choices:
- 'no'
- 'encrypt'
- 'verify'
version_added: "2.0"
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.20"
author:
- Pavel Antonov (@softzilla)
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: pull an image
docker_image:
name: pacur/centos-7
source: pull
- name: Tag and push to docker hub
docker_image:
name: pacur/centos-7:56
repository: dcoppenhagan/myimage:7.56
push: yes
source: local
- name: Tag and push to local registry
docker_image:
# Image will be centos:7
name: centos
# Will be pushed to localhost:5000/centos:7
repository: localhost:5000/centos
tag: 7
push: yes
source: local
- name: Add tag latest to image
docker_image:
name: myimage:7.1.2
repository: myimage:latest
source: local
- name: Remove image
docker_image:
state: absent
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
- name: Build an image and push it to a private repo
docker_image:
build:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
push: yes
source: build
- name: Archive image
docker_image:
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
archive_path: my_sinatra.tar
source: local
- name: Load image from archive and push to a private registry
docker_image:
name: localhost:5000/myimages/sinatra
tag: v1
push: yes
load_path: my_sinatra.tar
source: load
- name: Build image and with build args
docker_image:
name: myimage
build:
path: /path/to/build/dir
args:
log_volume: /var/log/myapp
listen_port: 8080
source: build
- name: Build image using cache source
docker_image:
name: myimage:latest
build:
path: /path/to/build/dir
# Use as cache source for building myimage
cache_from:
- nginx:latest
- alpine:3.8
source: build
'''
RETURN = '''
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
'''
import os
import re
from distutils.version import LooseVersion
from ansible.module_utils.docker.common import (
docker_version, AnsibleDockerClient, DockerBaseClass, is_image_name_id,
)
from ansible.module_utils._text import to_native
if docker_version is not None:
try:
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
from docker.auth import resolve_repository_name
else:
from docker.auth.auth import resolve_repository_name
from docker.utils.utils import parse_repository_tag
except ImportError:
# missing Docker SDK for Python handled in module_utils.docker.common
pass
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.source = parameters['source']
build = parameters['build'] or dict()
self.archive_path = parameters.get('archive_path')
self.cache_from = build.get('cache_from')
self.container_limits = build.get('container_limits')
self.dockerfile = build.get('dockerfile')
self.force_source = parameters.get('force_source')
self.force_absent = parameters.get('force_absent')
self.force_tag = parameters.get('force_tag')
self.load_path = parameters.get('load_path')
self.name = parameters.get('name')
self.network = build.get('network')
self.nocache = build.get('nocache')
self.build_path = build.get('path')
self.pull = build.get('pull')
self.repository = parameters.get('repository')
self.rm = build.get('rm')
self.state = parameters.get('state')
self.tag = parameters.get('tag')
self.http_timeout = build.get('http_timeout')
self.push = parameters.get('push')
self.buildargs = build.get('args')
self.use_config_proxy = build.get('use_config_proxy')
# If name contains a tag, it takes precedence over tag parameter.
if not is_image_name_id(self.name):
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if self.state == 'present':
self.present()
elif self.state == 'absent':
self.absent()
def fail(self, msg):
self.client.fail(msg)
def present(self):
'''
Handles state = 'present', which includes building, loading or pulling an image,
depending on user provided parameters.
:returns None
'''
image = self.client.find_image(name=self.name, tag=self.tag)
if not image or self.force_source:
if self.source == 'build':
# Build the image
if not os.path.isdir(self.build_path):
self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.log("Building image %s" % image_name)
self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.build_image()
elif self.source == 'load':
# Load the image from an archive
if not os.path.isfile(self.load_path):
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
self.load_path))
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.load_image()
elif self.source == 'pull':
# pull the image
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
self.results['changed'] = True
if not self.check_mode:
self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
elif self.source == 'local':
if image is None:
name = self.name
if self.tag:
name = "%s:%s" % (self.name, self.tag)
self.client.fail('Cannot find the image %s locally.' % name)
if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
self.results['changed'] = False
if self.archive_path:
self.archive_image(self.name, self.tag)
if self.push and not self.repository:
self.push_image(self.name, self.tag)
elif self.repository:
self.tag_image(self.name, self.tag, self.repository, push=self.push)
def absent(self):
'''
Handles state = 'absent', which removes an image.
:return None
'''
name = self.name
if is_image_name_id(name):
image = self.client.find_image_by_id(name)
else:
image = self.client.find_image(name, self.tag)
if self.tag:
name = "%s:%s" % (self.name, self.tag)
if image:
if not self.check_mode:
try:
self.client.remove_image(name, force=self.force_absent)
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, str(exc)))
self.results['changed'] = True
self.results['actions'].append("Removed image %s" % (name))
self.results['image']['state'] = 'Deleted'
def archive_image(self, name, tag):
'''
Archive an image to a .tar file. Called when archive_path is passed.
:param name - name of the image. Type: str
:return None
'''
if not tag:
tag = "latest"
image = self.client.find_image(name=name, tag=tag)
if not image:
self.log("archive image: image %s:%s not found" % (name, tag))
return
image_name = "%s:%s" % (name, tag)
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
self.results['changed'] = True
if not self.check_mode:
self.log("Getting archive of image %s" % image_name)
try:
image = self.client.get_image(image_name)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_name, str(exc)))
try:
with open(self.archive_path, 'wb') as fd:
if self.client.docker_py_version >= LooseVersion('3.0.0'):
for chunk in image:
fd.write(chunk)
else:
for chunk in image.stream(2048, decode_content=False):
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
image = self.client.find_image(name=name, tag=tag)
if image:
self.results['image'] = image
def push_image(self, name, tag=None):
'''
If the name of the image contains a repository path, then push the image.
:param name Name of the image to push.
:param tag Use a specific tag.
:return: None
'''
repository = name
if not tag:
repository, tag = parse_repository_tag(name)
registry, repo_name = resolve_repository_name(repository)
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
if registry:
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.results['changed'] = True
if not self.check_mode:
status = None
try:
changed = False
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
status = line.get('status')
if status == 'Pushing':
changed = True
self.results['changed'] = changed
except Exception as exc:
if re.search('unauthorized', str(exc)):
if re.search('authentication required', str(exc)):
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(registry, repo_name, tag, str(exc), registry))
else:
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(registry, repo_name, tag, str(exc)))
self.fail("Error pushing image %s: %s" % (repository, str(exc)))
self.results['image'] = self.client.find_image(name=repository, tag=tag)
if not self.results['image']:
self.results['image'] = dict()
self.results['image']['push_status'] = status
def tag_image(self, name, tag, repository, push=False):
'''
Tag an image into a repository.
:param name: name of the image. required.
:param tag: image tag.
:param repository: path to the repository. required.
:param push: bool. push the image once it's tagged.
:return: None
'''
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = "latest"
if tag:
repo_tag = tag
image = self.client.find_image(name=repo, tag=repo_tag)
found = 'found' if image else 'not found'
self.log("image %s was %s" % (repo, found))
if not image or self.force_tag:
self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
self.results['changed'] = True
self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
# cases, if we don't set force=True, it errors.
image_name = name
if tag and not re.search(tag, name):
image_name = "%s:%s" % (name, tag)
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
if not tag_status:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image - %s" % str(exc))
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
if image and image['Id'] == self.results['image']['Id']:
self.results['changed'] = False
if push:
self.push_image(repo, repo_tag)
def build_image(self):
'''
Build an image
:return: image dict
'''
params = dict(
path=self.build_path,
tag=self.name,
rm=self.rm,
nocache=self.nocache,
timeout=self.http_timeout,
pull=self.pull,
forcerm=self.rm,
dockerfile=self.dockerfile,
decode=True,
)
if self.client.docker_py_version < LooseVersion('3.0.0'):
params['stream'] = True
build_output = []
if self.tag:
params['tag'] = "%s:%s" % (self.name, self.tag)
if self.container_limits:
params['container_limits'] = self.container_limits
if self.buildargs:
for key, value in self.buildargs.items():
self.buildargs[key] = to_native(value)
params['buildargs'] = self.buildargs
if self.cache_from:
params['cache_from'] = self.cache_from
if self.network:
params['network_mode'] = self.network
if self.use_config_proxy:
params['use_config_proxy'] = self.use_config_proxy
# Due to a bug in docker-py, it will crash if
# use_config_proxy is True and buildargs is None
if 'buildargs' not in params:
params['buildargs'] = {}
for line in self.client.build(**params):
# line = json.loads(line)
self.log(line, pretty_print=True)
if "stream" in line:
build_output.append(line["stream"])
if line.get('error'):
if line.get('errorDetail'):
errorDetail = line.get('errorDetail')
self.fail(
"Error building %s - code: %s, message: %s, logs: %s" % (
self.name,
errorDetail.get('code'),
errorDetail.get('message'),
build_output))
else:
self.fail("Error building %s - message: %s, logs: %s" % (
self.name, line.get('error'), build_output))
return self.client.find_image(name=self.name, tag=self.tag)
def load_image(self):
'''
Load an image from a .tar archive
:return: image dict
'''
try:
self.log("Opening image %s" % self.load_path)
image_tar = open(self.load_path, 'rb')
except Exception as exc:
self.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
try:
self.log("Loading image from %s" % self.load_path)
self.client.load_image(image_tar)
except Exception as exc:
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
try:
image_tar.close()
except Exception as exc:
self.fail("Error closing image %s - %s" % (self.name, str(exc)))
return self.client.find_image(self.name, self.tag)
def main():
argument_spec = dict(
source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
build=dict(type='dict', suboptions=dict(
cache_from=dict(type='list', elements='str'),
container_limits=dict(type='dict', options=dict(
memory=dict(type='int'),
memswap=dict(type='int'),
cpushares=dict(type='int'),
cpusetcpus=dict(type='str'),
)),
dockerfile=dict(type='str'),
http_timeout=dict(type='int'),
network=dict(type='str'),
nocache=dict(type='bool', default=False),
path=dict(type='path', required=True),
pull=dict(type='bool'),
rm=dict(type='bool', default=True),
args=dict(type='dict'),
use_config_proxy=dict(type='bool'),
)),
archive_path=dict(type='path'),
container_limits=dict(type='dict', options=dict(
memory=dict(type='int'),
memswap=dict(type='int'),
cpushares=dict(type='int'),
cpusetcpus=dict(type='str'),
), removedin_version='2.12'),
dockerfile=dict(type='str', removedin_version='2.12'),
force=dict(type='bool', removed_in_version='2.12'),
force_source=dict(type='bool', default=False),
force_absent=dict(type='bool', default=False),
force_tag=dict(type='bool', default=False),
http_timeout=dict(type='int', removedin_version='2.12'),
load_path=dict(type='path'),
name=dict(type='str', required=True),
nocache=dict(type='bool', default=False, removedin_version='2.12'),
path=dict(type='path', aliases=['build_path'], removedin_version='2.12'),
pull=dict(type='bool', removedin_version='2.12'),
push=dict(type='bool', default=False),
repository=dict(type='str'),
rm=dict(type='bool', default=True, removedin_version='2.12'),
state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
tag=dict(type='str', default='latest'),
use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.11'),
buildargs=dict(type='dict', removedin_version='2.12'),
)
required_if = [
# ('state', 'present', ['source']), -- enable in Ansible 2.12.
# ('source', 'build', ['build']), -- enable in Ansible 2.12.
('source', 'load', ['load_path']),
]
def detect_build_cache_from(client):
return client.params['build'] and client.params['build']['cache_from'] is not None
def detect_build_network(client):
return client.params['build'] and client.params['build']['network'] is not None
def detect_use_config_proxy(client):
return client.params['build'] and client.params['build']['use_config_proxy'] is not None
option_minimal_versions = dict()
option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
min_docker_version='1.8.0',
min_docker_api_version='1.20',
option_minimal_versions=option_minimal_versions,
)
if client.module.params['state'] == 'build':
client.module.warn('The "build" state has been deprecated for a long time '
'and will be removed in Ansible 2.11. Please use '
'"present", which has the same meaning as "build".')
client.module.params['state'] = 'present'
if client.module.params['use_tls']:
client.module.warn('The "use_tls" option has been deprecated for a long time '
'and will be removed in Ansible 2.11. Please use the'
'"tls" and "tls_verify" options instead.')
build_options = dict(
container_limits='container_limits',
dockerfile='dockerfile',
http_timeout='http_timeout',
nocache='nocache',
path='path',
pull='pull',
rm='rm',
buildargs='args',
)
for option, build_option in build_options.items():
default_value = None
if option in ('rm', ):
default_value = True
elif option in ('nocache', ):
default_value = False
if client.module.params[option] != default_value:
if client.module.params['build'] is None:
client.module.params['build'] = dict()
if client.module.params['build'].get(build_option) != default_value:
client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
client.module.params['build'][build_option] = client.module.params[option]
client.module.warn('Please specify build.%s instead of %s. The %s option '
'has been renamed and will be removed in Ansible 2.12.' % (build_option, option, option))
if client.module.params['source'] == 'build':
if (not client.module.params['build'] or not client.module.params['build'].get('path')):
client.module.fail('If "source" is set to "build", the "build.path" option must be specified.')
if client.module.params['build']['pull'] is None:
client.module.warn("The default for build.pull is currently 'yes', but will be changed to 'no' in Ansible 2.12. "
"Please set build.pull explicitly to the value you need.")
client.module.params['build']['pull'] = True # TODO: change to False in Ansible 2.12
if client.module.params['state'] == 'present' and client.module.params['source'] is None:
# Autodetection. To be removed in Ansible 2.12.
if (client.module.params['build'] or dict()).get('path'):
client.module.params['source'] = 'build'
elif client.module.params['load_path']:
client.module.params['source'] = 'load'
else:
client.module.params['source'] = 'pull'
client.module.warn('The value of the "source" option was determined to be "%s". '
'Please set the "source" option explicitly. Autodetection will '
'be removed in Ansible 2.12.' % client.module.params['source'])
if client.module.params['force']:
client.module.params['force_source'] = True
client.module.params['force_absent'] = True
client.module.params['force_tag'] = True
client.module.warn('The "force" option will be removed in Ansible 2.12. Please '
'use the "force_source", "force_absent" or "force_tag" option '
'instead, depending on what you want to force.')
results = dict(
changed=False,
actions=[],
image={}
)
ImageManager(client, results)
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| 0.003272 |
"""
Django settings for webvirtcloud project.
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '4y(f4rfqc6f2!i8_vfuu)kav6tdv5#sc=n%o451dm+th0&3uci'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'computes',
'console',
'networks',
'storages',
'interfaces',
'instances',
'secrets',
'logs',
'accounts',
'create',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webvirtcloud.urls'
WSGI_APPLICATION = 'webvirtcloud.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
## WebVirtCloud settings
# Wobsock port
WS_PORT = 6080
# Websock host
WS_HOST = '0.0.0.0'
# Websock public port
WS_PUBLIC_HOST = None
# Websock SSL connection
WS_CERT = None
# list of console types
QEMU_CONSOLE_TYPES = ['vnc', 'spice']
# default console type
QEMU_CONSOLE_DEFAULT_TYPE = 'vnc'
# list taken from http://qemu.weilnetz.de/qemu-doc.html#sec_005finvocation
QEMU_KEYMAPS = ['ar', 'da', 'de', 'de-ch', 'en-gb', 'en-us', 'es', 'et', 'fi',
'fo', 'fr', 'fr-be', 'fr-ca', 'fr-ch', 'hr', 'hu', 'is', 'it',
'ja', 'lt', 'lv', 'mk', 'nl', 'nl-be', 'no', 'pl', 'pt',
'pt-br', 'ru', 'sl', 'sv', 'th', 'tr']
# keepalive interval and count for libvirt connections
LIBVIRT_KEEPALIVE_INTERVAL = 5
LIBVIRT_KEEPALIVE_COUNT = 5
| 0.000436 |
"""Image resampling methods"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.interpolate
import scipy.ndimage
from sunpy.extern.six.moves import range
__all__ = ['resample', 'reshape_image_to_4d_superpixel']
def resample(orig, dimensions, method='linear', center=False, minusone=False):
"""Returns a new `numpy.ndarray` that has been resampled up or down.
Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL's congrid routine (which apparently originally came from a
VAX/VMS routine of the same name.)
Parameters
----------
orig : `~numpy.ndarray`
Original inout array.
dimensions : tuple
Dimensions that new `~numpy.ndarray` should have.
method : {'neighbor' | 'nearest' | 'linear' | 'spline'}
Method to use for resampling interpolation.
* neighbor - Closest value from original data
* nearest and linear - Uses n x 1-D interpolations calculated by
`scipy.interpolate.interp1d`.
* spline - Uses ndimage.map_coordinates
center : bool
If True, interpolation points are at the centers of the bins,
otherwise points are at the front edge of the bin.
minusone : bool
For orig.shape = (i,j) & new dimensions = (x,y), if set to False
orig is resampled by factors of (i/x) * (j/y), otherwise orig
is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input
array.
Returns
-------
out : `~numpy.ndarray`
A new `~numpy.ndarray` which has been resampled to the desired
dimensions.
References
----------
| http://www.scipy.org/Cookbook/Rebinning (Original source, 2011/11/19)
"""
# Verify that number dimensions requested matches original shape
if len(dimensions) != orig.ndim:
raise UnequalNumDimensions("Number of dimensions must remain the same "
"when calling resample.")
#@note: will this be okay for integer (e.g. JPEG 2000) data?
if orig.dtype not in [np.float64, np.float32]:
orig = orig.astype(np.float64)
dimensions = np.asarray(dimensions, dtype=np.float64)
m1 = np.array(minusone, dtype=np.int64) # array(0) or array(1)
offset = np.float64(center * 0.5) # float64(0.) or float64(0.5)
# Resample data
if method == 'neighbor':
data = _resample_neighbor(orig, dimensions, offset, m1)
elif method in ['nearest','linear']:
data = _resample_nearest_linear(orig, dimensions, method,
offset, m1)
elif method == 'spline':
data = _resample_spline(orig, dimensions, offset, m1)
else:
raise UnrecognizedInterpolationMethod("Unrecognized interpolation "
"method requested.")
return data
def _resample_nearest_linear(orig, dimensions, method, offset, m1):
"""Resample Map using either linear or nearest interpolation."""
dimlist = []
# calculate new dims
for i in range(orig.ndim):
base = np.arange(dimensions[i])
dimlist.append((orig.shape[i] - m1) / (dimensions[i] - m1) *
(base + offset) - offset)
# specify old coordinates
old_coords = [np.arange(i, dtype=np.float) for i in orig.shape]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d(old_coords[-1], orig, bounds_error=False,
fill_value=min(old_coords[-1]), kind=method)
new_data = mint(dimlist[-1])
trorder = [orig.ndim - 1] + list(range(orig.ndim - 1))
for i in range(orig.ndim - 2, -1, -1):
new_data = new_data.transpose(trorder)
mint = scipy.interpolate.interp1d(old_coords[i], new_data,
bounds_error=False, fill_value=min(old_coords[i]), kind=method)
new_data = mint(dimlist[i])
if orig.ndim > 1:
# need one more transpose to return to original dimensions
new_data = new_data.transpose(trorder)
return new_data
def _resample_neighbor(orig, dimensions, offset, m1):
"""Resample Map using closest-value interpolation."""
dimlist = []
for i in range(orig.ndim):
base = np.indices(dimensions)[i]
dimlist.append((orig.shape[i] - m1) / (dimensions[i] - m1) *
(base + offset) - offset)
cd = np.array(dimlist).round().astype(int)
return orig[list(cd)]
def _resample_spline(orig, dimensions, offset, m1):
"""Resample Map using spline-based interpolation."""
oslices = [slice(0, j) for j in orig.shape]
# FIXME: not used?!
old_coords = np.ogrid[oslices] #pylint: disable=W0612
nslices = [slice(0, j) for j in list(dimensions)]
newcoords = np.mgrid[nslices]
newcoords_dims = list(range(np.rank(newcoords)))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims) #pylint: disable=W0612
# makes a view that affects newcoords
newcoords_tr += offset
deltas = (np.asarray(orig.shape) - m1) / (dimensions - m1)
newcoords_tr *= deltas
newcoords_tr -= offset
return scipy.ndimage.map_coordinates(orig, newcoords)
def reshape_image_to_4d_superpixel(img, dimensions, offset):
"""Re-shape the two dimension input image into a a four dimensional
array whose first and third dimensions express the number of original
pixels in the x and y directions that form one superpixel. The reshaping
makes it very easy to perform operations on superpixels.
An application of this reshaping is the following. Let's say you have an
array
x = np.array([[0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 0, 0]])
and you want to sum over 2x2 non-overlapping sub-arrays. For example, you
could have a noisy image and you want to increase the signal-to-noise ratio.
Summing over all the non-overlapping 2x2 sub-arrays will create a
superpixel array of the original data. Every pixel in the superpixel array
is the sum of the values in a 2x2 sub-array of the original array,
This summing can be done by reshaping the array
y = x.reshape(3,2,3,2)
and then summing over the 1st and third directions
y2 = y.sum(axis=3).sum(axis=1)
which gives the expected array.
array([[0, 3, 2],
[2, 0, 4],
[1, 2, 2]])
Parameters
----------
img : `numpy.ndarray`
A two-dimensional `~numpy.ndarray` of the form (y, x).
dimensions : array-like
A two element array-like object containing integers that describe the
superpixel summation in the (y, x) directions.
offset : array-like
A two element array-like object containing integers that describe
where in the input image the array reshaping begins in the (y, x)
directions.
Returns
-------
A four dimensional `~numpy.ndarray` that can be used to easily create
two-dimensional arrays of superpixels of the input image.
References
----------
Taken from
http://mail.scipy.org/pipermail/numpy-discussion/2010-July/051760.html
"""
# make sure the input dimensions are integers
dimensions = [int(dim) for dim in dimensions]
# New dimensions of the final image
na = int(np.floor((img.shape[0] - offset[0]) / dimensions[0]))
nb = int(np.floor((img.shape[1] - offset[1]) / dimensions[1]))
# Reshape up to a higher dimensional array which is useful for higher
# level operations
return (img[offset[0]:offset[0] + na*dimensions[0],
offset[1]:offset[1] + nb*dimensions[1]]).reshape(na, dimensions[0], nb, dimensions[1])
class UnrecognizedInterpolationMethod(ValueError):
"""Unrecognized interpolation method specified."""
pass
class UnequalNumDimensions(ValueError):
"""Number of dimensions does not match input array"""
pass
| 0.001669 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, print_function,
unicode_literals, division)
from inspect import isgenerator
from collections import Mapping, Iterable
from six import text_type, binary_type
from .auxiliary import OrderedDict
from .escaping import escape_arbitrary_bytes
from .filterbase import FilterBase
from .testableiter import TestableIter
def dump_dict(d, keys=None):
if keys is None:
keys = (d.keys() if OrderedDict and isinstance(d, OrderedDict)
else sorted(d.keys()))
return dump_key_values((k, d[k]) for k in keys)
def dump_list(l):
if not hasattr(l, '__getitem__'):
# assume objects without __getitem__ don't enforce an ordering
l = sorted(l)
return dump_key_values((text_type(i), v) for i, v in enumerate(l))
def dump_generator(k, g):
g = TestableIter(g)
if g:
for k_, v_ in g:
yield '{0!s}__{1}'.format(k, k_), v_
else:
yield k, 'EMPTY'
def dump_key_values(kv_iterable):
for k, v in kv_iterable:
k = escape_arbitrary_bytes(k)
if isinstance(v, FilterBase):
v = v.to_value()
if isgenerator(v):
for k_, v_ in dump_generator(k, v):
yield k_, v_
else:
yield k, v
elif isinstance(v, (text_type, binary_type)):
yield k, escape_arbitrary_bytes(v)
elif isinstance(v, Iterable):
g = dump_generator(k, (dump_dict(v) if isinstance(v, Mapping)
else dump_list(v)))
for k_, v_ in g:
yield k_, v_
else:
yield k, escape_arbitrary_bytes(text_type(v))
| 0.001151 |
# Copyright (c) 2014-2016 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, Gio
from threading import Thread
from gettext import gettext as _
try:
from lollypop.wikipedia import Wikipedia
except:
pass
from lollypop.define import Lp
from lollypop.utils import get_network_available
from lollypop.cache import InfoCache
from lollypop.lio import Lio
class InfoContent(Gtk.Stack):
"""
Widget showing artist image and bio
"""
def __init__(self):
"""
Init artists content
"""
Gtk.Stack.__init__(self)
InfoCache.init()
self._stop = False
self.__cancel = Gio.Cancellable.new()
self._artist = ""
self.set_transition_duration(500)
self.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/InfoContent.ui')
self.__content = builder.get_object('content')
self.__image = builder.get_object('image')
self._menu_found = builder.get_object('menu-found')
self._menu_not_found = builder.get_object('menu-not-found')
self.__error = builder.get_object('error')
self.add_named(builder.get_object('widget'), 'widget')
self.add_named(builder.get_object('notfound'), 'notfound')
self._spinner = builder.get_object('spinner')
self.add_named(self._spinner, 'spinner')
def clear(self):
"""
Clear content
"""
self.__content.set_text('')
self.__image.hide()
self.__image.clear()
def stop(self):
"""
Stop loading
"""
self._stop = True
self.__cancel.cancel()
@property
def artist(self):
"""
Current artist on screen as str
"""
return self._artist
def set_content(self, prefix, content, image_url, suffix):
"""
populate widget with content
@param prefix as str
@param content as str
@param image url as str
@param suffix as str
@thread safe
"""
try:
data = None
stream = None
if content is not None:
if image_url is not None:
f = Lio.File.new_for_uri(image_url)
(status, data, tag) = f.load_contents(self.__cancel)
if status:
stream = Gio.MemoryInputStream.new_from_data(data,
None)
else:
data = None
InfoCache.add(prefix, content, data, suffix)
GLib.idle_add(self.__set_content, content, stream)
except Exception as e:
print("InfoContent::set_content: %s" % e)
#######################
# PROTECTED #
#######################
def _load_cache_content(self, prefix, suffix):
"""
Load from cache
@param prefix as str
@param suffix as str
@return True if loaded
"""
(content, data) = InfoCache.get(prefix, suffix)
if content is not None:
stream = None
if data is not None:
stream = Gio.MemoryInputStream.new_from_data(data, None)
GLib.idle_add(self.__set_content, content, stream)
return True
return False
def __on_not_found(self):
"""
Show not found child
"""
if get_network_available():
error = _("No information for this artist")
else:
error = _("Network access disabled")
self.__error.set_markup(
"<span font_weight='bold' size='xx-large'>" +
error +
"</span>")
self.set_visible_child_name('notfound')
#######################
# PRIVATE #
#######################
def __set_content(self, content, stream):
"""
Set content
@param content as string
@param data as Gio.MemoryInputStream
"""
if content is not None:
self.__content.set_markup(
GLib.markup_escape_text(content.decode('utf-8')))
if stream is not None:
scale = self.__image.get_scale_factor()
# Will happen if cache is broken or when reading empty files
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_stream_at_scale(
stream,
Lp().settings.get_value(
'cover-size').get_int32() + 50 * scale,
-1,
True,
None)
stream.close()
surface = Gdk.cairo_surface_create_from_pixbuf(pixbuf,
scale,
None)
del pixbuf
self.__image.set_from_surface(surface)
del surface
self.__image.show()
except:
pass
self.set_visible_child_name('widget')
else:
self.__on_not_found()
self._spinner.stop()
class WikipediaContent(InfoContent):
"""
Show wikipedia content
"""
def __init__(self):
"""
Init widget
"""
InfoContent.__init__(self)
self.__album = ""
self.__menu_model = Gio.Menu()
self._menu_not_found.set_menu_model(self.__menu_model)
self._menu_found.set_menu_model(self.__menu_model)
self.__app = Gio.Application.get_default()
def populate(self, artist, album):
"""
Populate content
@param artist as str
@param album as str
@thread safe
"""
self._artist = artist
self.__album = album
if not self._load_cache_content(artist, 'wikipedia'):
GLib.idle_add(self.set_visible_child_name, 'spinner')
self._spinner.start()
self.__load_page_content(artist)
elif get_network_available():
t = Thread(target=self.__setup_menu,
args=(self._artist, self.__album))
t.daemon = True
t.start()
def clear(self):
"""
Clear model and then content
"""
self.__menu_model.remove_all()
InfoContent.clear(self)
#######################
# PROTECTED #
#######################
def __on_not_found(self):
"""
Show not found child
"""
InfoContent.__on_not_found(self)
if get_network_available():
t = Thread(target=self.__setup_menu,
args=(self._artist, self.__album))
t.daemon = True
t.start()
#######################
# PRIVATE #
#######################
def __load_page_content(self, artist):
"""
Load artist page content
@param artist as str
"""
GLib.idle_add(self.__menu_model.remove_all)
wp = Wikipedia()
try:
(url, content) = wp.get_page_infos(artist)
except:
url = content = None
if not self._stop:
InfoContent.set_content(self, self._artist, content,
url, 'wikipedia')
if get_network_available():
t = Thread(target=self.__setup_menu,
args=(self._artist, self.__album))
t.daemon = True
t.start()
def __setup_menu(self, artist, album):
"""
Setup menu for artist
@param artist as str
@param album as str
"""
wp = Wikipedia()
result = wp.search(artist)
result += wp.search(artist + ' ' + album)
cleaned = list(set(result))
if artist in cleaned:
cleaned.remove(artist)
GLib.idle_add(self.__setup_menu_strings, cleaned)
def __setup_menu_strings(self, strings):
"""
Setup a menu with strings
@param strings as [str]
"""
if strings:
self._menu_not_found.show()
self._menu_found.show()
else:
return
i = 0
for string in strings:
action = Gio.SimpleAction(name="wikipedia_%s" % i)
self.__app.add_action(action)
action.connect('activate',
self.__on_search_activated,
string)
self.__menu_model.append(string, "app.wikipedia_%s" % i)
i += 1
def __on_search_activated(self, action, variant, artist):
"""
Switch to page
@param action as SimpleAction
@param variant as GVariant
@param artist as str
"""
InfoCache.remove(artist, 'wikipedia')
InfoContent.clear(self)
self.set_visible_child_name('spinner')
self._spinner.start()
t = Thread(target=self.__load_page_content, args=(artist,))
t.daemon = True
t.start()
class LastfmContent(InfoContent):
"""
Show lastfm content
"""
def __init__(self):
"""
Init widget
"""
InfoContent.__init__(self)
def populate(self, artist):
"""
Populate content
@param artist as str
@thread safe
"""
self._artist = artist
if not self._load_cache_content(artist, 'lastfm'):
GLib.idle_add(self.set_visible_child_name, 'spinner')
self._spinner.start()
self.__load_page_content(artist)
#######################
# PRIVATE #
#######################
def __load_page_content(self, artist):
"""
Load artists page content
@param artist as str
"""
try:
(url, content) = Lp().lastfm.get_artist_info(artist)
except:
url = content = None
if not self._stop:
InfoContent.set_content(self, artist, content, url, 'lastfm')
| 0.00036 |
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| 0.001262 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((11940.8, 9337.41, -158.497), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((12195.5, 9106.45, -1101.02), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((10973.7, 8381.8, 247.435), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((9509.51, 7502.26, 1857.76), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((9049.4, 7235.22, 2374.42), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((9271.81, 5480.11, 1126.19), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((8891.71, 4722.9, 2841.27), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((9264.8, 3068.69, 3090.96), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((9587.39, 2345.5, 4456.43), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((9372.65, 1421.29, 6023.79), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((10384.3, 2018.85, 7266.34), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((12030.1, 740.68, 7673.3), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((13581.2, -581.047, 7931.29), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((13475.7, 146.89, 6484.97), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((12253.4, -548.309, 7403.08), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((10716, -250.452, 7660.92), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((9688.92, 505.105, 6940.12), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8489.39, 1369.57, 6361.24), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((8300.87, 988.724, 4612.07), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((6821.07, 1179.69, 4341.4), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5037.12, 1018.56, 4308.68), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((3860.26, -168.363, 4360.48), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4417.72, 1037.95, 4913.69), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5257.27, 2248.03, 6444.98), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5966.74, 2786.32, 8402.05), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6289.63, 2979.25, 9397.68), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5529.08, 5570.16, 9035.92), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4760.85, 7131.28, 9708.64), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3786.6, 7360.01, 8945.25), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((1654.98, 7410.22, 8021.9), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2487.12, 7674.97, 7874.06), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((2296.04, 8839.24, 8689.6), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((1332.12, 10891.2, 8555.1), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((2253.76, 11876.7, 7994.34), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((2973.08, 12304, 6725.2), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((2849.51, 13287.1, 5241.82), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2150.15, 13246.5, 3662.35), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1533.01, 11876.1, 4094.41), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((930.542, 12322.5, 5434.01), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2140.21, 11729.7, 6912.62), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2388.73, 12815.5, 7471.97), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((2840.28, 11558.1, 7238.14), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((3207.73, 11615.9, 6493.22), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((2851.89, 11765.5, 7132.13), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2411.48, 10531.3, 8401.7), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((4521.95, 8917.17, 9506.66), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((6159.18, 9125.57, 10404.4), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((7182.6, 9642.03, 10404.9), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((7812.64, 11430.5, 11020.9), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((8034.26, 13635.6, 12511.2), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((6557.43, 14208, 11872), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((5027.99, 11762.6, 12234.2), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5139.78, 11881.3, 11476.4), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((4503.82, 13181.9, 10230.1), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4542.31, 13826.8, 8823.6), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5277.87, 12715, 7563.24), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| 0.025214 |
#!/usr/bin/env python
# Install.py tool to download, unpack, build, and link to the Voro++ library
# used to automate the steps described in the README file in this dir
from __future__ import print_function
import sys,os,re,subprocess
# help message
help = """
Syntax from src dir: make lib-voronoi args="-b"
or: make lib-voronoi args="-p /usr/local/voro++-0.4.6"
or: make lib-voronoi args="-b -v voro++-0.4.6"
Syntax from lib dir: python Install.py -b -v voro++-0.4.6
or: python Install.py -b
or: python Install.py -p /usr/local/voro++-0.4.6
specify one or more options, order does not matter
-b = download and build the Voro++ library
-p = specify folder of existing Voro++ installation
-v = set version of Voro++ to download and build (default voro++-0.4.6)
Example:
make lib-voronoi args="-b" # download/build in lib/voronoi/voro++-0.4.6
make lib-voronoi args="-p $HOME/voro++-0.4.6" # use existing Voro++ installation in $HOME/voro++-0.4.6
"""
# settings
version = "voro++-0.4.6"
url = "http://math.lbl.gov/voro++/download/dir/%s.tar.gz" % version
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# expand to full path name
# process leading '~' or relative path
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def geturl(url,fname):
success = False
if which('curl') != None:
cmd = 'curl -L -o "%s" %s' % (fname,url)
try:
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
success = True
except subprocess.CalledProcessError as e:
print("Calling curl failed with: %s" % e.output.decode('UTF-8'))
if not success and which('wget') != None:
cmd = 'wget -O "%s" %s' % (fname,url)
try:
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
success = True
except subprocess.CalledProcessError as e:
print("Calling wget failed with: %s" % e.output.decode('UTF-8'))
if not success:
error("Failed to download source code with 'curl' or 'wget'")
return
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
homepath = "."
homedir = version
buildflag = False
pathflag = False
linkflag = True
iarg = 0
while iarg < nargs:
if args[iarg] == "-v":
if iarg+2 > nargs: error()
version = args[iarg+1]
iarg += 2
elif args[iarg] == "-p":
if iarg+2 > nargs: error()
voropath = fullpath(args[iarg+1])
pathflag = True
iarg += 2
elif args[iarg] == "-b":
buildflag = True
iarg += 1
else: error()
homepath = fullpath(homepath)
homedir = "%s/%s" % (homepath,version)
if (pathflag):
if not os.path.isdir(voropath): error("Voro++ path does not exist")
homedir = voropath
if (buildflag and pathflag):
error("Cannot use -b and -p flag at the same time")
if (not buildflag and not pathflag):
error("Have to use either -b or -p flag")
# download and unpack Voro++ tarball
if buildflag:
print("Downloading Voro++ ...")
geturl(url,"%s/%s.tar.gz" % (homepath,version))
print("Unpacking Voro++ tarball ...")
if os.path.exists("%s/%s" % (homepath,version)):
cmd = 'rm -rf "%s/%s"' % (homepath,version)
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = 'cd "%s"; tar -xzvf %s.tar.gz' % (homepath,version)
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
os.remove("%s/%s.tar.gz" % (homepath,version))
if os.path.basename(homedir) != version:
if os.path.exists(homedir):
cmd = 'rm -rf "%s"' % homedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
os.rename("%s/%s" % (homepath,version),homedir)
# build Voro++
if buildflag:
print("Building Voro++ ...")
cmd = 'cd "%s"; make CXX=g++ CFLAGS="-fPIC -O3"' % homedir
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
# create 2 links in lib/voronoi to Voro++ src dir
if linkflag:
print("Creating links to Voro++ include and lib files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
cmd = 'ln -s "%s/src" includelink' % homedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = 'ln -s "%s/src" liblink' % homedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
| 0.019808 |
#!/usr/bin/python3
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('usage: sudo ./Adafruit_DHT.py [11|22|2302] GPIOpin#')
print('example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
| 0.00737 |
#! /usr/bin/env python
# tokenize.py
#
# Parses a C/C++/C#/D/Java/Pawn/whatever file in an array of
# tuples (string, type)
#
# punctuator lookup table
punc_table = [
[ '!', 25, 26, '!' ], # 0: '!'
[ '#', 24, 35, '#' ], # 1: '#'
[ '$', 23, 0, '$' ], # 2: '$'
[ '%', 22, 36, '%' ], # 3: '%'
[ '&', 21, 41, '&' ], # 4: '&'
[ '(', 20, 0, '(' ], # 5: '('
[ ')', 19, 0, ')' ], # 6: ')'
[ '*', 18, 43, '*' ], # 7: '*'
[ '+', 17, 44, '+' ], # 8: '+'
[ ',', 16, 0, ',' ], # 9: ','
[ '-', 15, 46, '-' ], # 10: '-'
[ '.', 14, 50, '.' ], # 11: '.'
[ '/', 13, 53, '/' ], # 12: '/'
[ ':', 12, 54, ':' ], # 13: ':'
[ ';', 11, 0, ';' ], # 14: ';'
[ '<', 10, 56, '<' ], # 15: '<'
[ '=', 9, 63, '=' ], # 16: '='
[ '>', 8, 65, '>' ], # 17: '>'
[ '?', 7, 0, '?' ], # 18: '?'
[ '[', 6, 70, '[' ], # 19: '['
[ ']', 5, 0, ']' ], # 20: ']'
[ '^', 4, 71, '^' ], # 21: '^'
[ '{', 3, 0, '{' ], # 22: '{'
[ '|', 2, 72, '|' ], # 23: '|'
[ '}', 1, 0, '}' ], # 24: '}'
[ '~', 0, 74, '~' ], # 25: '~'
[ '<', 3, 30, '!<' ], # 26: '!<'
[ '=', 2, 33, '!=' ], # 27: '!='
[ '>', 1, 34, '!>' ], # 28: '!>'
[ '~', 0, 0, '!~' ], # 29: '!~'
[ '=', 1, 0, '!<=' ], # 30: '!<='
[ '>', 0, 32, '!<>' ], # 31: '!<>'
[ '=', 0, 0, '!<>='], # 32: '!<>='
[ '=', 0, 0, '!==' ], # 33: '!=='
[ '=', 0, 0, '!>=' ], # 34: '!>='
[ '#', 0, 0, '##' ], # 35: '##'
[ ':', 2, 39, '%:' ], # 36: '%:'
[ '=', 1, 0, '%=' ], # 37: '%='
[ '>', 0, 0, '%>' ], # 38: '%>'
[ '%', 0, 40, None ], # 39: '%:%'
[ ':', 0, 0, '%:%:'], # 40: '%:%:'
[ '&', 1, 0, '&&' ], # 41: '&&'
[ '=', 0, 0, '&=' ], # 42: '&='
[ '=', 0, 0, '*=' ], # 43: '*='
[ '+', 1, 0, '++' ], # 44: '++'
[ '=', 0, 0, '+=' ], # 45: '+='
[ '-', 2, 0, '--' ], # 46: '--'
[ '=', 1, 0, '-=' ], # 47: '-='
[ '>', 0, 49, '->' ], # 48: '->'
[ '*', 0, 0, '->*' ], # 49: '->*'
[ '*', 1, 0, '.*' ], # 50: '.*'
[ '.', 0, 52, '..' ], # 51: '..'
[ '.', 0, 0, '...' ], # 52: '...'
[ '=', 0, 0, '/=' ], # 53: '/='
[ ':', 1, 0, '::' ], # 54: '::'
[ '>', 0, 0, ':>' ], # 55: ':>'
[ '%', 4, 0, '<%' ], # 56: '<%'
[ ':', 3, 0, '<:' ], # 57: '<:'
[ '<', 2, 61, '<<' ], # 58: '<<'
[ '=', 1, 0, '<=' ], # 59: '<='
[ '>', 0, 62, '<>' ], # 60: '<>'
[ '=', 0, 0, '<<=' ], # 61: '<<='
[ '=', 0, 0, '<>=' ], # 62: '<>='
[ '=', 0, 64, '==' ], # 63: '=='
[ '=', 0, 0, '===' ], # 64: '==='
[ '=', 1, 0, '>=' ], # 65: '>='
[ '>', 0, 67, '>>' ], # 66: '>>'
[ '=', 1, 0, '>>=' ], # 67: '>>='
[ '>', 0, 69, '>>>' ], # 68: '>>>'
[ '=', 0, 0, '>>>='], # 69: '>>>='
[ ']', 0, 0, '[]' ], # 70: '[]'
[ '=', 0, 0, '^=' ], # 71: '^='
[ '=', 1, 0, '|=' ], # 72: '|='
[ '|', 0, 0, '||' ], # 73: '||'
[ '=', 1, 0, '~=' ], # 74: '~='
[ '~', 0, 0, '~~' ], # 75: '~~'
]
#
# Token types:
# 0 = newline
# 1 = punctuator
# 2 = integer
# 3 = float
# 4 = string
# 5 = identifier
#
class tokenizer:
def __init__(self):
self.tokens = []
self.text = ''
self.text_idx = 0
def tokenize_text (self, in_text):
self.tokens = []
self.text = in_text
self.text_idx = 0
print in_text
try:
while self.text_idx < len(self.text):
if self.parse_whitespace():
continue
elif self.text[self.text_idx] == '\\' and self.text[self.text_idx + 1] == '\n':
self.text_idx += 2
continue
elif self.parse_comment():
continue
elif self.parse_number():
continue
elif self.parse_identifier():
continue
elif self.parse_string():
continue
elif self.parse_punctuator():
continue
else:
print 'confused:', self.text[self.text_idx:]
break
except:
print 'bombed'
raise
def parse_whitespace(self):
start_idx = self.text_idx
hit_newline = False
while self.text_idx < len(self.text):
if self.text[self.text_idx] in '\n\r':
hit_newline = True
elif not self.text[self.text_idx] in ' \t':
break
self.text_idx += 1
if hit_newline:
self.tokens.append(('\n', 0))
return start_idx != self.text_idx
def parse_comment(self):
if not self.text[self.text_idx] == '/' or not self.text[self.text_idx + 1] in '/*':
return False
if self.text[self.text_idx + 1] == '/':
while self.text_idx < len(self.text):
if self.text[self.text_idx] in '\n\r':
break;
self.text_idx += 1
else:
while self.text_idx < len(self.text) - 1:
if self.text[self.text_idx] == '*' and self.text[self.text_idx + 1] == '/':
self.text_idx += 2
break;
self.text_idx += 1
return True
def parse_identifier(self):
if not self.text[self.text_idx].upper() in '@_ABCDEFGHIJKLMNOPQRSTUVWXYZ':
return False
start_idx = self.text_idx
while self.text_idx < len(self.text) and self.text[self.text_idx].upper() in '@_ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890':
self.text_idx += 1
self.tokens.append((self.text[start_idx : self.text_idx], 5))
return True
def parse_string(self):
starter = 0
start_ch = self.text[self.text_idx]
if start_ch == 'L':
starter = 1
start_ch = self.text[self.text_idx + 1]
if not start_ch in '"\'':
return False
start_idx = self.text_idx
self.text_idx += starter + 1
escaped = False
while self.text_idx < len(self.text):
if escaped:
escaped = False
else:
if self.text[self.text_idx] == '\\':
escaped = True
elif self.text[self.text_idx] == start_ch:
self.text_idx += 1
break;
self.text_idx += 1
self.tokens.append((self.text[start_idx : self.text_idx], 4))
return True
# Checks for punctuators
# Returns whether a punctuator was consumed (True or False)
def parse_punctuator(self):
tab_idx = 0
punc_len = 0
saved_punc = None
while 1:
pte = punc_table[tab_idx]
if pte[0] == self.text[self.text_idx]:
if pte[3] != None:
saved_punc = pte[3]
self.text_idx += 1
tab_idx = pte[2]
if tab_idx == 0:
break
elif pte[1] == 0:
break
else:
tab_idx += 1
if saved_punc != None:
self.tokens.append((saved_punc, 1))
return True
return False
def parse_number(self):
# A number must start with a digit or a dot followed by a digit
ch = self.text[self.text_idx]
if not ch.isdigit() and (ch != '.' or not self.text[self.text_idx + 1].isdigit()):
return False;
token_type = 2 # integer
if (ch == '.'):
token_type = 3 # float
did_hex = False;
start_idx = self.text_idx
# Check for Hex, Octal, or Binary
# Note that only D and Pawn support binary, but who cares?
#
if ch == '0':
self.text_idx += 1
ch = self.text[self.text_idx].upper()
if ch == 'X': # hex
did_hex = True
self.text_idx += 1
while self.text[self.text_idx] in '_0123456789abcdefABCDEF':
self.text_idx += 1
elif ch == 'B': # binary
self.text_idx += 1
while self.text[self.text_idx] in '_01':
self.text_idx += 1
elif ch >= '0' and ch <= 7: # octal (but allow decimal)
self.text_idx += 1
while self.text[self.text_idx] in '_0123456789':
self.text_idx += 1
else:
# either just 0 or 0.1 or 0UL, etc
pass
else:
# Regular int or float
while self.text[self.text_idx] in '_0123456789':
self.text_idx += 1
# Check if we stopped on a decimal point
if self.text[self.text_idx] == '.':
self.text_idx += 1
token_type = 3 # float
if did_hex:
while self.text[self.text_idx] in '_0123456789abcdefABCDEF':
self.text_idx += 1
else:
while self.text[self.text_idx] in '_0123456789':
self.text_idx += 1
# Check exponent
# Valid exponents per language (not that it matters):
# C/C++/D/Java: eEpP
# C#/Pawn: eE
if self.text[self.text_idx] in 'eEpP':
token_type = 3 # float
self.text_idx += 1
if self.text[self.text_idx] in '+-':
self.text_idx += 1
while self.text[self.text_idx] in '_0123456789':
self.text_idx += 1
# Check the suffixes
# Valid suffixes per language (not that it matters):
# Integer Float
# C/C++: uUlL lLfF
# C#: uUlL fFdDMm
# D: uUL ifFL
# Java: lL fFdD
# Pawn: (none) (none)
#
# Note that i, f, d, and m only appear in floats.
while 1:
if self.text[self.text_idx] in 'tTfFdDmM':
token_type = 3 # float
elif not self.text[self.text_idx] in 'lLuU':
break;
self.text_idx += 1
self.tokens.append((self.text[start_idx : self.text_idx], token_type))
return True
text = """
1.23+4-3*16%2 *sin(1.e-3 + .5p32) "hello" and "hello\\"there"
123 // some comment
a = b + c;
#define abc \\
5
d = 5 /* hello */ + 3;
"""
t=tokenizer()
t.tokenize_text(text)
print t.tokens
| 0.048204 |
#!/usr/bin/env python
"""xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
| 0.001077 |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: action_maketokenizer.py OUTPUTS -- INPUTS
#
# Multiple INPUTS may be listed. The sections are separated by -- arguments.
#
# OUTPUTS must contain a single item: a path to tokenizer.cpp.
#
# INPUTS must contain exactly two items. The first item must be the path to
# maketokenizer. The second item must be the path to tokenizer.flex.
import os
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 2
(outputs, inputs) = sections
assert len(outputs) == 1
output = outputs[0]
assert len(inputs) == 2
maketokenizer = inputs[0]
flexInput = inputs[1]
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
outfile = open(output, 'wb')
p1 = subprocess.Popen(['flex', '-t', flexInput], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['perl', maketokenizer], stdin=p1.stdout, stdout=outfile)
r1 = p1.wait()
r2 = p2.wait()
assert r1 == 0
assert r2 == 0
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 0.002243 |
from pkgutil import extend_path
from i3pystatus.core import Status
from i3pystatus.core.modules import Module, IntervalModule
from i3pystatus.core.settings import SettingsBase
from i3pystatus.core.util import formatp, get_module
import argparse
import imp
import logging
import os
__path__ = extend_path(__path__, __name__)
__all__ = [
"Status",
"Module", "IntervalModule",
"SettingsBase",
"formatp",
"get_module",
]
if not os.path.exists(os.path.join(os.path.expanduser("~"), ".i3pystatus-log")):
os.mkdir(os.path.join(os.path.expanduser("~"), ".i3pystatus-log"), 0o755)
logpath = os.path.join(os.path.expanduser("~"), ".i3pystatus-log", ".i3pystatus-%s" % os.getpid())
handler = logging.FileHandler(logpath, delay=True)
logger = logging.getLogger("i3pystatus")
logger.addHandler(handler)
logger.setLevel(logging.CRITICAL)
def clock_example():
from i3pystatus.clock import Clock
status = Status()
status.register(Clock())
status.run()
def main():
parser = argparse.ArgumentParser(description='''
run i3pystatus configuration file. Starts i3pystatus clock example if no arguments were provided
''')
parser.add_argument('-c', '--config', help='path to configuration file', default=None, required=False)
args = parser.parse_args()
if args.config:
module_name = "i3pystatus-config"
imp.load_source(module_name, args.config)
else:
clock_example()
| 0.003453 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simulacrum documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 22 16:20:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'Simulacrum'
# General information about the project.
project = 'Simulacrum'
copyright = '2014, M.A. Hicks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
rst_epilog = """
.. raw:: html
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-54603999-1', 'auto');
ga('send', 'pageview');
</script>
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Agogo:
# "headerbg" : "#CCAD00",
# "headercolor1" : "#000000",
# "headerlinkcolor" : "#000000"
# Haiku:
# "textcolor" : "#000000",
# "headingcolor" : "#000000",
# "linkcolor" : "",
# "visitedlinkcolor": "",
# "hoverlinkcolor" : ""
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Simulacrum Project"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "Simulacrum"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "Images/slogo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "Images/simulacrum.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Simulacrumdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('Simulacrum', 'Simulacrum.tex', 'Simulacrum Documentation',
'M.A. Hicks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "Images/slogo_small.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('Simulacrum', 'simulacrum', 'Simulacrum Documentation',
['M.A. Hicks'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('Simulacrum', 'Simulacrum', 'Simulacrum Documentation',
'M.A. Hicks', 'Simulacrum', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Simulacrum'
epub_author = 'M.A. Hicks'
epub_publisher = 'M.A. Hicks'
epub_copyright = '2013 --, M.A. Hicks'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 0.005848 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import mxnet as mx
from mxnet import nd
from .ndarray_utils import get_mx_ndarray, nd_forward_and_profile, nd_forward_backward_and_profile
from .common_utils import merge_map_list
from .op_registry_utils import prepare_op_inputs
from benchmark.opperf.rules.default_params import PARAMS_OF_TYPE_NDARRAY
from .profiler_utils import cpp_profile, python_profile
no_backward = {'gather_nd', 'softmax_cross_entropy', 'linalg_gelqf', 'linalg_slogdet', 'moments', 'SequenceLast', 'Embedding'}
def _prepare_op_inputs(inputs, run_backward, dtype, ctx):
mx.random.seed(41)
kwargs_list = []
for inp in inputs:
kwargs = {}
for key, value in inp.items():
if key in PARAMS_OF_TYPE_NDARRAY:
kwargs[key] = get_mx_ndarray(ctx=ctx, in_tensor=value,
dtype=dtype,
initializer=nd.normal,
attach_grad=run_backward)
else:
kwargs[key] = value
kwargs_list.append(kwargs)
return kwargs_list
def parse_input_ndarray(input_dict):
"""Parse input for ndarray and extract array shape for better readability
Parameters
----------
input_dict : dict
Dictionary of input
Input Dictionary
'inputs': {'weight':
[[ 2.2122064 0.7740038 1.0434405 1.1839255 1.8917114 ]
[-1.2347414 -1.771029 -0.45138445 0.57938355 -1.856082 ]
[-1.9768796 -0.20801921 0.2444218 -0.03716067 -0.48774993]
[-0.02261727 0.57461417 1.4661262 0.6862904 0.35496104]
[ 1.0731696 0.12017461 -0.9711102 -0.77569664 -0.7882176 ]]
<NDArray 5x5 @cpu(0)>, 'grad':
[[ 0.7417728 -1.4734439 -1.0730928 -1.0424827 -1.3278849 ]
[-1.4749662 -0.52414197 1.2662556 0.8950642 -0.6015945 ]
[ 1.2040559 -0.9712193 -0.58256227 0.3717077 0.9300072 ]
[-1.4225755 -0.5176199 2.0088325 0.2863085 0.5604595 ]
[ 0.96975976 -0.52853745 -1.88909 0.65479124 -0.45481315]]
<NDArray 5x5 @cpu(0)>, 'mean':
[[ 0.32510808 -1.3002341 0.3679345 1.4534262 0.24154152]
[ 0.47898006 0.96885103 -1.0218245 -0.06812762 -0.31868345]
[-0.17634277 0.35655284 0.74419165 0.7787424 0.6087823 ]
[ 1.0741756 0.06642842 0.8486986 -0.8003802 -0.16882208]
[ 0.93632793 0.357444 0.77932847 -1.0103073 -0.39157307]]
<NDArray 5x5 @cpu(0)>, 'var':
[[ 1.3166187 -0.43292624 0.71535987 0.9254156 -0.90495086]
[-0.074684 0.82254 -1.8785107 0.8858836 1.9118724 ]
[ 0.33342266 0.11883813 -1.9198899 -0.67558455 1.007749 ]
[-0.35391203 1.6323917 -0.33354783 -1.7378405 0.7737382 ]
[ 0.89126545 3.2904532 -1.1976235 1.8938874 -0.5669272 ]]
<NDArray 5x5 @cpu(0)>, 't': 1, 'wd': 0.1}
Output
{'inputs': {'weight': '<NDArray 5x5 @cpu(0)>', 'grad': '<NDArray 5x5 @cpu(0)>', 'mean': '<NDArray 5x5 @cpu(0)>', 'var': '<NDArray 5x5 @cpu(0)>', 't': 1, 'wd': 0.1}
"""
no_new_line_input_dict=dict()
for key,value in input_dict.items():
if isinstance(value,nd.NDArray):
# if value in input is NDArray then extract last line only
val = str(value).split('\n')[-1]
no_new_line_input_dict[key]=val
else:
no_new_line_input_dict[key]=value
return no_new_line_input_dict
def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list, profiler):
if profiler == 'native':
if run_backward:
benchmark_helper_func = cpp_profile(nd_forward_backward_and_profile)
else:
benchmark_helper_func = cpp_profile(nd_forward_and_profile)
elif profiler == 'python':
if run_backward:
benchmark_helper_func = python_profile(nd_forward_backward_and_profile)
else:
benchmark_helper_func = python_profile(nd_forward_and_profile)
else:
raise ValueError("Incorrect input for profiler. Valid input - 'python' or 'native'")
# Warm up, ignore the profiler output
_, _ = benchmark_helper_func(op, warmup, **kwargs_list[0])
# Run Benchmarks
op_benchmark_result = {op.__name__: []}
logging.info("Begin Benchmark - {name}".format(name=op.__name__))
for idx, kwargs in enumerate(kwargs_list):
_, profiler_output = benchmark_helper_func(op, runs, **kwargs)
# Add inputs used for profiling this operator into result
# parse input if it contains ndarray, replace with shape info for better markdown readability
new_inp = parse_input_ndarray(inputs[idx])
profiler_output = merge_map_list([{"inputs": new_inp}] + [profiler_output])
op_benchmark_result[op.__name__].append(profiler_output)
logging.info("Complete Benchmark - {name}".format(name=op.__name__))
return op_benchmark_result
def run_performance_test(ops, inputs, run_backward=True,
dtype='float32', ctx=mx.cpu(), profiler='native',
warmup=10, runs=50):
"""Run operator benchmark for given operator or list of operators, ops, with the given inputs.
Returns benchmark results as a list of dictionary where each dictionary represents benchmarks result per operator.
key -> name of the operator and value -> map of results (forward time, backward time, time spent in memory
operations.
Parameters
----------
ops: [Str]
One or list of operators to benchmark. Should be an NDArray operator.
inputs: map
Inputs for operator. Key should be name of parameter for operator.
Example: inputs = {"lhs": (1024, 1024), "rhs": (1024, 1024)} for mx.nd.add
run_backward: Boolean, Default is True
Should we have backward operator benchmarks.
dtype: Str, default 'float32'
Precision to use for input tensors. Defaults to float32. Example: 'float32', 'int64'
ctx: mx.ctx, default mx.cpu()
Context to use for benchmarks. Default to mx.cpu()
profiler: Str, default 'native'
Type of profiler to run benchmarks. Default to 'native'
Option - ['python', 'native']
warmup: int, default 10
Number of warmup runs
runs: int, default 50
Number of runs for capturing benchmark results
Returns
-------
List of dictionary of benchmark results. key -> name of the operator, Value is benchmark results.
Note: when run_performance_test is called on the nd.Embedding operator with run_backward=True, an error will
be thrown. Track issue here: https://github.com/apache/incubator-mxnet/issues/11314
"""
kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx)
if not isinstance(ops, list):
ops = [ops]
op_benchmark_result = []
for op in ops:
if hasattr(mx.nd, op.__name__):
benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list, profiler)
else:
raise ValueError("Unknown NDArray operator provided to benchmark. - ", op.__name__)
op_benchmark_result.append(benchmark_result)
return op_benchmark_result
def run_op_benchmarks(ops, dtype, ctx, profiler, int64_tensor, warmup, runs):
# Running im2col either forwards or backwards on GPU results in errors
# track issue here: https://github.com/apache/incubator-mxnet/issues/17493
gpu_disabled_ops = ['im2col']
# For each operator, run benchmarks
mx_op_benchmark_results = []
for op, op_params in ops.items():
if ctx == mx.cpu() or op not in gpu_disabled_ops:
# Prepare inputs for the operator
inputs = prepare_op_inputs(op, op_params, int64_tensor)
# setting backward false for ops with known issue
if op in no_backward:
op_params["has_backward"] = False
# Run benchmarks
cur_op_res = run_performance_test(op_params["nd_op_handle"],
run_backward=op_params["has_backward"],
dtype=dtype, ctx=ctx,
profiler=profiler,
inputs=inputs,
warmup=warmup, runs=runs)
mx_op_benchmark_results += cur_op_res
# Prepare combined results for all operators
mx_op_benchmark_results = merge_map_list(mx_op_benchmark_results)
return mx_op_benchmark_results
| 0.002783 |
# testing/schema.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import exclusions
from .. import schema, event
from . import config
__all__ = 'Table', 'Column',
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
kw.update(table_options)
if exclusions.against(config._current, 'mysql'):
if 'mysql_engine' not in kw and 'mysql_type' not in kw:
if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts:
kw['mysql_engine'] = 'InnoDB'
else:
kw['mysql_engine'] = 'MyISAM'
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around seleting self-refs too.
if exclusions.against(config._current, 'firebird'):
table_name = args[0]
unpack = (config.db.dialect.
identifier_preparer.unformat_identifiers)
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [fk
for col in args if isinstance(col, schema.Column)
for fk in col.foreign_keys]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = 'CASCADE'
if fk.onupdate is None:
fk.onupdate = 'CASCADE'
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
if not config.requirements.foreign_key_ddl.enabled_for_config(config):
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if 'test_needs_autoincrement' in test_opts and \
kw.get('primary_key', False):
# allow any test suite to pick up on this
col.info['test_needs_autoincrement'] = True
# hardcoded rule for firebird, oracle; this should
# be moved out
if exclusions.against(config._current, 'firebird', 'oracle'):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(_truncate_name(
config.db.dialect, tbl.name + '_' + c.name + '_seq'),
optional=True)
)
event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
"_" + hex(hash(name) % 64)[2:]
else:
return name
| 0 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service_facts
short_description: list or describe services in ecs
description:
- Lists or describes services in ecs.
version_added: "2.1"
author:
- "Mark Chance (@Java1Guy)"
- "Darek Kaczynski (@kaczynskid)"
requirements: [ json, botocore, boto3 ]
options:
details:
description:
- Set this to true if you want detailed information about the services.
required: false
default: 'false'
type: bool
events:
description:
- Whether to return ECS service events. Only has an effect if C(details) is true.
required: false
default: 'true'
type: bool
version_added: "2.6"
cluster:
description:
- The cluster ARNS in which to list the services.
required: false
default: 'default'
service:
description:
- One or more services to get details for
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
service: console-test-service
details: true
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
'''
RETURN = '''
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: str
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: str
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: str
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: str
serviceName:
description: A user-generated string used to identify the service
returned: always
type: str
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: str
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: str
deployments:
description: list of service deployments
returned: always
type: list of complex
events:
description: list of service events
returned: when events is true
type: list of complex
''' # NOQA
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, AWSRetry
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_services_with_backoff(self, **kwargs):
paginator = self.ecs.get_paginator('list_services')
try:
return paginator.paginate(**kwargs).build_full_result()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ClusterNotFoundException':
self.module.fail_json_aws(e, "Could not find cluster to list services")
else:
raise
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def describe_services_with_backoff(self, **kwargs):
return self.ecs.describe_services(**kwargs)
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
try:
response = self.list_services_with_backoff(**fn_args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't list ECS services")
relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services'] = services
try:
response = self.describe_services_with_backoff(**fn_args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
running_services = [self.extract_service_from(service) for service in response.get('services', [])]
services_not_running = response.get('failures', [])
return running_services, services_not_running
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
if not self.module.params['events']:
del service['events']
else:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
""" https://stackoverflow.com/a/312464 """
for i in range(0, len(l), n):
yield l[i:i + n]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(type='bool', default=False),
events=dict(type='bool', default=True),
cluster=dict(),
service=dict(type='list')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
show_details = module.params.get('details')
task_mgr = EcsServiceManager(module)
if show_details:
if module.params['service']:
services = module.params['service']
else:
services = task_mgr.list_services(module.params['cluster'])['services']
ecs_facts = dict(services=[], services_not_running=[])
for chunk in chunks(services, 10):
running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
ecs_facts['services'].extend(running_services)
ecs_facts['services_not_running'].extend(services_not_running)
else:
ecs_facts = task_mgr.list_services(module.params['cluster'])
module.exit_json(changed=False, ansible_facts=ecs_facts, **ecs_facts)
if __name__ == '__main__':
main()
| 0.001608 |
from django.conf import settings
from django import forms
from crits.core.widgets import CalWidget
from crits.vocabulary.relationships import RelationshipTypes
class ForgeRelationshipForm(forms.Form):
"""
Django form for forging relationships between two top-level objects.
"""
error_css_class = 'error'
required_css_class = 'required'
forward_type = forms.CharField(required=True,
label="Source Type",
widget = forms.TextInput(attrs={'readonly':'readonly'}))
forward_value = forms.CharField(required=True,
label="Source ID",
widget = forms.TextInput(attrs={'readonly':'readonly'}))
forward_relationship = forms.ChoiceField(required=True,
widget=forms.Select(attrs={'class':'relationship-types'}),
label="Relationship")
reverse_type = forms.ChoiceField(required=True,
widget=forms.Select,
label="Dest Type")
dest_id = forms.CharField(required=True,
label="Dest ID")
relationship_date = forms.DateTimeField(widget=CalWidget(format=settings.PY_DATETIME_FORMAT,
attrs={'class':'datetimeclass',
'size':'25'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False,
label="Relationship Date")
rel_confidence = forms.ChoiceField(required=True, label='Confidence', widget=forms.Select)
rel_reason = forms.CharField(label="Reason", required=False, widget=forms.Textarea(attrs={'cols':38, 'rows': 2}))
def __init__(self, *args, **kwargs):
super(ForgeRelationshipForm, self).__init__(*args, **kwargs)
self.fields['forward_type'].choices = self.fields['reverse_type'].choices = [
(c, c) for c in sorted(settings.CRITS_TYPES.iterkeys())
]
self.fields['forward_relationship'].choices = [
(c, c) for c in RelationshipTypes.values(sort=True)
]
self.fields['rel_confidence'].choices = [('unknown', 'unknown'),
('low', 'low'),
('medium', 'medium'),
('high', 'high')]
self.fields['rel_confidence'].initial = 'medium'
def clean(self):
cleaned_data = super(ForgeRelationshipForm, self).clean()
if 'forward_value' in cleaned_data:
try:
cleaned_data['forward_value'] = cleaned_data['forward_value'].strip()
except:
pass
if 'dest_id' in cleaned_data:
try:
cleaned_data['dest_id'] = cleaned_data['dest_id'].strip()
except:
pass
return cleaned_data
| 0.007571 |
# -*- coding: utf-8 -*-
# (c) 2015 Esther Martin - AvanzOSC
# (c) 2015 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.multi
@api.depends('commercial_partner_id',
'commercial_partner_id.sale_order_ids',
'commercial_partner_id.sale_order_ids.state',
'commercial_partner_id.child_ids',
'commercial_partner_id.child_ids.sale_order_ids',
'commercial_partner_id.child_ids.sale_order_ids.state',
'commercial_partner_id.invoice_ids',
'commercial_partner_id.child_ids.invoice_ids')
def _compute_prospect(self):
for record in self:
sale_ids = (
record.commercial_partner_id.sale_order_ids +
record.commercial_partner_id.mapped(
'child_ids.sale_order_ids'))
invoice_ids = (
record.commercial_partner_id.invoice_ids +
record.commercial_partner_id.mapped(
'child_ids.invoice_ids'))
record.prospect = (
not sale_ids.filtered(
lambda r: r.state not in
('draft', 'sent', 'cancel')) and
not invoice_ids.filtered(
lambda r: r.type in ('out_invoice', 'out_refund')))
prospect = fields.Boolean(
string='Prospect', compute='_compute_prospect', default=False,
store=True)
| 0 |
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm as cm
from matplotlib import dates
import calendar
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
save_file = False
start_date = datetime(2013,10,1)
end_date = datetime(2013,11,1)
hour_step = 168#336#168
min_BC_VED = 80
max_BC_VED = 220
interval = 5
min_rBC_mass = ((min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
max_rBC_mass = ((max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
##############initialize binning variables
def createBinDict(min_VED,max_VED,interval_length):
bins = []
start_size = min_VED #VED in nm
end_size = max_VED #VED in nm
#in nm
#create list of size bins
while start_size < end_size:
bins.append(start_size)
start_size += interval_length
#create dictionary with size bins as keys
bin_data = {}
for bin in bins:
bin_data[bin] = [0,0]
return bin_data
binned_data_min = createBinDict(min_BC_VED,max_BC_VED,interval)
binned_data_max = createBinDict(min_BC_VED,max_BC_VED,interval)
fraction_successful = createBinDict(min_BC_VED,max_BC_VED,interval)
for key in binned_data_min:
binned_data_min[key] = []
binned_data_max[key] = []
os.chdir('C:/Users/Sarah Hanna/Documents/Data/Alert Data/coating data/')
while start_date < end_date:
print start_date
period_end = start_date + timedelta(hours = hour_step)
UNIX_start_time = calendar.timegm(start_date.utctimetuple())
UNIX_end_time = calendar.timegm(period_end.utctimetuple())
cursor.execute(('''SELECT rBC_mass_fg,coat_thickness_nm_min,coat_thickness_nm_max,LF_scat_amp,UNIX_UTC_ts
FROM alert_leo_coating_data
WHERE UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and HK_flag = 0 and coat_thickness_nm_min >= %s and rBC_mass_fg IS NOT NULL'''),
(UNIX_start_time,UNIX_end_time,0))
coat_data = cursor.fetchall()
#hexbin plot
new_data = []
file_data = []
for row in coat_data:
mass = row[0]
min_coat = row[1]
max_coat = row[2]
LEO_amp = row[3]
UNIX_UTC_ts = row[4]
date_time = datetime.utcfromtimestamp(UNIX_UTC_ts)
VED = (((mass/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
for key in fraction_successful:
key_value = float(key)
interval_end = key_value + interval
if VED >= key_value and VED < interval_end:
fraction_successful[key][0] = fraction_successful[key][0] + 1
if LEO_amp >= 0:
fraction_successful[key][1] = fraction_successful[key][1] + 1
if min_coat != None:
binned_data_min[key].append(min_coat)
if max_coat != None:
binned_data_max[key].append(max_coat)
#fraction detectable
fractions_detectable = []
for bin, counts in fraction_successful.iteritems():
bin_midpoint = bin + interval/2.0
total_particles = counts[0]
detectable_notches = counts[1]
try:
fraction_detectable = detectable_notches*1.0/total_particles
except:
fraction_detectable=np.nan
fractions_detectable.append([bin_midpoint,fraction_detectable])
fractions_detectable.sort()
#coats for cores
min_coats = []
max_coats = []
for bin, counts in binned_data_min.iteritems():
bin_midpoint = bin + interval/2.0
min_avg_coat = np.mean(binned_data_min[bin])
min_coats.append([bin_midpoint,min_avg_coat])
min_coats.sort()
for bin, counts in binned_data_max.iteritems():
bin_midpoint = bin + interval/2.0
max_avg_coat = np.mean(binned_data_max[bin])
max_coats.append([bin_midpoint,max_avg_coat])
max_coats.sort()
#make lists
bins = [row[0] for row in fractions_detectable]
fractions = [row[1] for row in fractions_detectable]
core_size_min = [row[0] for row in min_coats]
coat_min_size = [row[1] for row in min_coats]
core_size_max = [row[0] for row in max_coats]
coat_max_size = [row[1] for row in max_coats]
#plotting
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
min = ax1.fill_between(core_size_min, coat_min_size, coat_max_size,label = 'coating min', alpha = 0.5)#, norm= norm) #bins='log', norm=norm
ax1.scatter(core_size_min, coat_min_size, label = 'coating max',color='k')#, norm= norm) #bins='log', norm=norm
ax1.scatter(core_size_max, coat_max_size, label = 'coating max',color='r')#, norm= norm) #bins='log', norm=norm
ax1.set_xlabel('rBC core diameter')
ax1.set_ylabel('range of coating thickness')
ax1.set_ylim(0,220)
ax1.set_xlim(min_BC_VED,max_BC_VED)
fig.subplots_adjust(right=0.8)
ax2 = ax1.twinx()
ax2.scatter(bins, fractions, color = 'g', marker = 's')
ax2.set_ylabel('fraction of detectable notch positions',color='g')
ax2.set_ylim(0,1)
plt.xlim(min_BC_VED,max_BC_VED)
#ax3 = fig.add_subplot(212)
#ax3.scatter(core_size_max, coat_max_size)#, norm= norm) #bins='log', norm=norm
#ax3.set_xlabel('rBC core diameter')
#ax3.set_ylabel('Maximum coating thickness')
#ax3.set_ylim(-30,220)
#ax3.set_xlim(min_BC_VED,max_BC_VED)
#
#ax4 = ax3.twinx()
#ax4.scatter(bins, fractions, color = 'r')
#ax4.set_ylabel('fraction of detectable notch positions',color='r')
#ax4.set_ylim(0,1)
#plt.xlim(min_BC_VED,max_BC_VED)
#plt.savefig('C:/Users/Sarah Hanna/Documents/Data/Alert Data/coating data/' + file_name + '.png', bbox_inches='tight')
plt.legend()
plt.show()
#start_date = start_date + timedelta(hours = hour_step)
#continue
#
start_date = start_date + timedelta(hours = hour_step)
cnx.close() | 0.047148 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A framework for developing sources for new file types.
To create a source for a new file type a sub-class of :class:`FileBasedSource`
should be created. Sub-classes of :class:`FileBasedSource` must implement the
method :meth:`FileBasedSource.read_records()`. Please read the documentation of
that method for more details.
For an example implementation of :class:`FileBasedSource` see
:class:`~apache_beam.io._AvroSource`.
"""
# pytype: skip-file
from typing import Callable
from apache_beam.internal import pickler
from apache_beam.io import concat_source
from apache_beam.io import iobase
from apache_beam.io import range_trackers
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.restriction_trackers import OffsetRange
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import check_accessible
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import PTransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.util import Reshuffle
MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 25
__all__ = ['FileBasedSource']
class FileBasedSource(iobase.BoundedSource):
"""A :class:`~apache_beam.io.iobase.BoundedSource` for reading a file glob of
a given type."""
MIN_NUMBER_OF_FILES_TO_STAT = 100
MIN_FRACTION_OF_FILES_TO_STAT = 0.01
def __init__(
self,
file_pattern,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
splittable=True,
validate=True):
"""Initializes :class:`FileBasedSource`.
Args:
file_pattern (str): the file glob to read a string or a
:class:`~apache_beam.options.value_provider.ValueProvider`
(placeholder to inject a runtime value).
min_bundle_size (int): minimum size of bundles that should be generated
when performing initial splitting on this source.
compression_type (str): Used to handle compressed output files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`,
in which case the final file path's extension will be used to detect
the compression.
splittable (bool): whether :class:`FileBasedSource` should try to
logically split a single file into data ranges so that different parts
of the same file can be read in parallel. If set to :data:`False`,
:class:`FileBasedSource` will prevent both initial and dynamic splitting
of sources for single files. File patterns that represent multiple files
may still get split into sources for individual files. Even if set to
:data:`True` by the user, :class:`FileBasedSource` may choose to not
split the file, for example, for compressed files where currently it is
not possible to efficiently read a data range without decompressing the
whole file.
validate (bool): Boolean flag to verify that the files exist during the
pipeline creation time.
Raises:
TypeError: when **compression_type** is not valid or if
**file_pattern** is not a :class:`str` or a
:class:`~apache_beam.options.value_provider.ValueProvider`.
ValueError: when compression and splittable files are
specified.
IOError: when the file pattern specified yields an empty
result.
"""
if not isinstance(file_pattern, (str, ValueProvider)):
raise TypeError(
'%s: file_pattern must be of type string'
' or ValueProvider; got %r instead' %
(self.__class__.__name__, file_pattern))
if isinstance(file_pattern, str):
file_pattern = StaticValueProvider(str, file_pattern)
self._pattern = file_pattern
self._concat_source = None
self._min_bundle_size = min_bundle_size
if not CompressionTypes.is_valid_compression_type(compression_type):
raise TypeError(
'compression_type must be CompressionType object but '
'was %s' % type(compression_type))
self._compression_type = compression_type
self._splittable = splittable
if validate and file_pattern.is_accessible():
self._validate()
def display_data(self):
return {
'file_pattern': DisplayDataItem(
str(self._pattern), label="File Pattern"),
'compression': DisplayDataItem(
str(self._compression_type), label='Compression Type')
}
@check_accessible(['_pattern'])
def _get_concat_source(self):
# type: () -> concat_source.ConcatSource
if self._concat_source is None:
pattern = self._pattern.get()
single_file_sources = []
match_result = FileSystems.match([pattern])[0]
files_metadata = match_result.metadata_list
# We create a reference for FileBasedSource that will be serialized along
# with each _SingleFileSource. To prevent this FileBasedSource from having
# a reference to ConcatSource (resulting in quadratic space complexity)
# we clone it here.
file_based_source_ref = pickler.loads(pickler.dumps(self))
for file_metadata in files_metadata:
file_name = file_metadata.path
file_size = file_metadata.size_in_bytes
if file_size == 0:
continue # Ignoring empty file.
# We determine splittability of this specific file.
splittable = (
self.splittable and _determine_splittability_from_compression_type(
file_name, self._compression_type))
single_file_source = _SingleFileSource(
file_based_source_ref,
file_name,
0,
file_size,
min_bundle_size=self._min_bundle_size,
splittable=splittable)
single_file_sources.append(single_file_source)
self._concat_source = concat_source.ConcatSource(single_file_sources)
return self._concat_source
def open_file(self, file_name):
return FileSystems.open(
file_name,
'application/octet-stream',
compression_type=self._compression_type)
@check_accessible(['_pattern'])
def _validate(self):
"""Validate if there are actual files in the specified glob pattern
"""
pattern = self._pattern.get()
# Limit the responses as we only want to check if something exists
match_result = FileSystems.match([pattern], limits=[1])[0]
if len(match_result.metadata_list) <= 0:
raise IOError('No files found based on the file pattern %s' % pattern)
def split(
self, desired_bundle_size=None, start_position=None, stop_position=None):
return self._get_concat_source().split(
desired_bundle_size=desired_bundle_size,
start_position=start_position,
stop_position=stop_position)
@check_accessible(['_pattern'])
def estimate_size(self):
pattern = self._pattern.get()
match_result = FileSystems.match([pattern])[0]
return sum([f.size_in_bytes for f in match_result.metadata_list])
def read(self, range_tracker):
return self._get_concat_source().read(range_tracker)
def get_range_tracker(self, start_position, stop_position):
return self._get_concat_source().get_range_tracker(
start_position, stop_position)
def read_records(self, file_name, offset_range_tracker):
"""Returns a generator of records created by reading file 'file_name'.
Args:
file_name: a ``string`` that gives the name of the file to be read. Method
``FileBasedSource.open_file()`` must be used to open the file
and create a seekable file object.
offset_range_tracker: a object of type ``OffsetRangeTracker``. This
defines the byte range of the file that should be
read. See documentation in
``iobase.BoundedSource.read()`` for more information
on reading records while complying to the range
defined by a given ``RangeTracker``.
Returns:
an iterator that gives the records read from the given file.
"""
raise NotImplementedError
@property
def splittable(self):
return self._splittable
def _determine_splittability_from_compression_type(file_path, compression_type):
if compression_type == CompressionTypes.AUTO:
compression_type = CompressionTypes.detect_compression_type(file_path)
return compression_type == CompressionTypes.UNCOMPRESSED
class _SingleFileSource(iobase.BoundedSource):
"""Denotes a source for a specific file type."""
def __init__(
self,
file_based_source,
file_name,
start_offset,
stop_offset,
min_bundle_size=0,
splittable=True):
if not isinstance(start_offset, int):
raise TypeError(
'start_offset must be a number. Received: %r' % start_offset)
if stop_offset != range_trackers.OffsetRangeTracker.OFFSET_INFINITY:
if not isinstance(stop_offset, int):
raise TypeError(
'stop_offset must be a number. Received: %r' % stop_offset)
if start_offset >= stop_offset:
raise ValueError(
'start_offset must be smaller than stop_offset. Received %d and %d '
'for start and stop offsets respectively' %
(start_offset, stop_offset))
self._file_name = file_name
self._is_gcs_file = file_name.startswith('gs://') if file_name else False
self._start_offset = start_offset
self._stop_offset = stop_offset
self._min_bundle_size = min_bundle_size
self._file_based_source = file_based_source
self._splittable = splittable
def split(self, desired_bundle_size, start_offset=None, stop_offset=None):
if start_offset is None:
start_offset = self._start_offset
if stop_offset is None:
stop_offset = self._stop_offset
if self._splittable:
splits = OffsetRange(start_offset, stop_offset).split(
desired_bundle_size, self._min_bundle_size)
for split in splits:
yield iobase.SourceBundle(
split.stop - split.start,
_SingleFileSource(
# Copying this so that each sub-source gets a fresh instance.
pickler.loads(pickler.dumps(self._file_based_source)),
self._file_name,
split.start,
split.stop,
min_bundle_size=self._min_bundle_size,
splittable=self._splittable),
split.start,
split.stop)
else:
# Returning a single sub-source with end offset set to OFFSET_INFINITY (so
# that all data of the source gets read) since this source is
# unsplittable. Choosing size of the file as end offset will be wrong for
# certain unsplittable source, e.g., compressed sources.
yield iobase.SourceBundle(
stop_offset - start_offset,
_SingleFileSource(
self._file_based_source,
self._file_name,
start_offset,
range_trackers.OffsetRangeTracker.OFFSET_INFINITY,
min_bundle_size=self._min_bundle_size,
splittable=self._splittable),
start_offset,
range_trackers.OffsetRangeTracker.OFFSET_INFINITY)
def estimate_size(self):
return self._stop_offset - self._start_offset
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = self._start_offset
if stop_position is None:
# If file is unsplittable we choose OFFSET_INFINITY as the default end
# offset so that all data of the source gets read. Choosing size of the
# file as end offset will be wrong for certain unsplittable source, for
# e.g., compressed sources.
stop_position = (
self._stop_offset if self._splittable else
range_trackers.OffsetRangeTracker.OFFSET_INFINITY)
range_tracker = range_trackers.OffsetRangeTracker(
start_position, stop_position)
if not self._splittable:
range_tracker = range_trackers.UnsplittableRangeTracker(range_tracker)
return range_tracker
def read(self, range_tracker):
return self._file_based_source.read_records(self._file_name, range_tracker)
def default_output_coder(self):
return self._file_based_source.default_output_coder()
class _ExpandIntoRanges(DoFn):
def __init__(
self, splittable, compression_type, desired_bundle_size, min_bundle_size):
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._splittable = splittable
self._compression_type = compression_type
def process(self, element, *args, **kwargs):
match_results = FileSystems.match([element])
for metadata in match_results[0].metadata_list:
splittable = (
self._splittable and _determine_splittability_from_compression_type(
metadata.path, self._compression_type))
if splittable:
for split in OffsetRange(0, metadata.size_in_bytes).split(
self._desired_bundle_size, self._min_bundle_size):
yield (metadata, split)
else:
yield (
metadata,
OffsetRange(0, range_trackers.OffsetRangeTracker.OFFSET_INFINITY))
class _ReadRange(DoFn):
def __init__(self, source_from_file):
# type: (Callable[[str], iobase.BoundedSource]) -> None
self._source_from_file = source_from_file
def process(self, element, *args, **kwargs):
metadata, range = element
source = self._source_from_file(metadata.path)
# Following split() operation has to be performed to create a proper
# _SingleFileSource. Otherwise what we have is a ConcatSource that contains
# a single _SingleFileSource. ConcatSource.read() expects a RangeTracker for
# sub-source range and reads full sub-sources (not byte ranges).
source_list = list(source.split(float('inf')))
# Handle the case of an empty source.
if not source_list:
return
source = source_list[0].source
for record in source.read(range.new_tracker()):
yield record
class ReadAllFiles(PTransform):
"""A Read transform that reads a PCollection of files.
Pipeline authors should not use this directly. This is to be used by Read
PTransform authors who wishes to implement file-based Read transforms that
read a PCollection of files.
"""
def __init__(self,
splittable, # type: bool
compression_type,
desired_bundle_size, # type: int
min_bundle_size, # type: int
source_from_file, # type: Callable[[str], iobase.BoundedSource]
):
"""
Args:
splittable: If False, files won't be split into sub-ranges. If True,
files may or may not be split into data ranges.
compression_type: A ``CompressionType`` object that specifies the
compression type of the files that will be processed. If
``CompressionType.AUTO``, system will try to automatically
determine the compression type based on the extension of
files.
desired_bundle_size: the desired size of data ranges that should be
generated when splitting a file into data ranges.
min_bundle_size: minimum size of data ranges that should be generated when
splitting a file into data ranges.
source_from_file: a function that produces a ``BoundedSource`` given a
file name. System will use this function to generate
``BoundedSource`` objects for file paths. Note that file
paths passed to this will be for individual files, not
for file patterns even if the ``PCollection`` of files
processed by the transform consist of file patterns.
"""
self._splittable = splittable
self._compression_type = compression_type
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._source_from_file = source_from_file
def expand(self, pvalue):
return (
pvalue
| 'ExpandIntoRanges' >> ParDo(
_ExpandIntoRanges(
self._splittable,
self._compression_type,
self._desired_bundle_size,
self._min_bundle_size))
| 'Reshard' >> Reshuffle()
| 'ReadRange' >> ParDo(_ReadRange(self._source_from_file)))
| 0.005378 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2017 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, api, fields
class GenerateCommunicationWizard(models.TransientModel):
_inherit = 'partner.communication.generate.wizard'
success_story_id = fields.Many2one(
'success.story', 'Success Story', domain=[('type', '=', 'story')])
print_subject = fields.Boolean(default=True)
print_header = fields.Boolean()
@api.multi
def generate(self):
return super(GenerateCommunicationWizard, self.with_context(
default_print_subject=self.print_subject,
default_print_header=self.print_header,
default_success_story_id=self.success_story_id.id,
)).generate()
| 0 |
Subsets and Splits