text
stringlengths 681
1.05M
| score
float64 0
0.27
|
---|---|
"""
Test the session-flushing middleware
"""
import unittest
from django.conf import settings
from django.test import Client
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestSessionFlushMiddleware(unittest.TestCase):
"""
Ensure that if the pipeline is exited when it's been quarantined,
the entire session is flushed.
"""
def test_session_flush(self):
"""
Test that a quarantined session is flushed when navigating elsewhere
"""
client = Client()
session = client.session
session['fancy_variable'] = 13025
session['partial_pipeline'] = 'pipeline_running'
session['third_party_auth_quarantined_modules'] = ('fake_quarantined_module',)
session.save()
client.get('/')
self.assertEqual(client.session.get('fancy_variable', None), None)
def test_session_no_running_pipeline(self):
"""
Test that a quarantined session without a running pipeline is not flushed
"""
client = Client()
session = client.session
session['fancy_variable'] = 13025
session['third_party_auth_quarantined_modules'] = ('fake_quarantined_module',)
session.save()
client.get('/')
self.assertEqual(client.session.get('fancy_variable', None), 13025)
def test_session_no_quarantine(self):
"""
Test that a session with a running pipeline but no quarantine is not flushed
"""
client = Client()
session = client.session
session['fancy_variable'] = 13025
session['partial_pipeline'] = 'pipeline_running'
session.save()
client.get('/')
self.assertEqual(client.session.get('fancy_variable', None), 13025)
| 0.002798 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add ip_allocation to port """
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '5cd92597d11d'
down_revision = '6b461a21bcfc'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.NEWTON]
def upgrade():
op.add_column('ports',
sa.Column('ip_allocation',
sa.String(length=16),
nullable=True))
| 0 |
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved
# Django
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.contrib.auth.models import User
class UpdatePassword(object):
def update_password(self, username, password):
changed = False
u = User.objects.get(username=username)
if not u:
raise RuntimeError("User not found")
check = u.check_password(password)
if not check:
u.set_password(password)
u.save()
changed = True
return changed
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--username', dest='username', action='store', type=str, default=None,
help='username to change the password for')
parser.add_argument('--password', dest='password', action='store', type=str, default=None,
help='new password for user')
def handle(self, *args, **options):
if not options['username']:
raise CommandError('username required')
if not options['password']:
raise CommandError('password required')
cp = UpdatePassword()
res = cp.update_password(options['username'], options['password'])
if res:
return "Password updated"
return "Password not updated"
| 0.001407 |
import numpy as np
from petsc4py import PETSc
from src.geo import *
from src import stokes_flow as sf
from src.support_class import *
from src.StokesFlowMethod import *
__all__ = ['createEcoli_ellipse', 'createEcoliComp_ellipse', 'createEcoli_2tails',
'createEcoliComp_tunnel', 'createEcoli_tunnel', 'create_ecoli_dualTail',
'create_ecoli_2part', 'create_ecoli_tail', 'create_ecoli_tail_at',
'create_rotlets_tail_2part', 'create_selfRepeat_tail',
'create_ecoli_2part_at', 'create_ecoli_dualTail_at',
'get_tail_nodes_split_at', 'get_ecoli_nodes_split_at',
'create_diskVane_tail',
'create_capsule',
'create_rod',
'create_infHelix',
'create_helicoid_list', 'create_helicoid_comp',
'creat_helicoid_dumb', 'creat_helicoid_dumb_selfRotate',
'obj2helicoid_list', 'obj2helicoid_list_v2', 'obj2helicoid_list_v3',
'obj2helicoid_comp', 'obj2helicoid_list_selfRotate',
'create_sphere', 'create_move_single_sphere', 'create_one_ellipse']
def create_capsule(rs1, rs2, ls, ds, node_dof=3):
lvs3 = ls - 2 * rs2
dth = ds / rs2
err_msg = 'geo parameter of create_capsule head is wrong. '
assert lvs3 >= 0, err_msg
vsgeo = base_geo()
vsgeo.set_dof(node_dof)
vsgeo1 = ellipse_base_geo() # velocity node geo of head
vsgeo1.create_half_delta(ds, rs1, rs2)
vsgeo2 = vsgeo1.copy()
vsgeo1.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
vsgeo1.node_rotation(norm=np.array((0, 0, 1)), theta=-np.pi / 2)
vsgeo1.move((0, 0, +lvs3 / 2))
vsgeo2.node_rotation(norm=np.array((0, 1, 0)), theta=+np.pi / 2)
vsgeo2.node_rotation(norm=np.array((0, 0, 1)), theta=+np.pi / 2 - dth)
vsgeo2.move((0, 0, -lvs3 / 2))
vsgeo2.set_nodes(np.flipud(vsgeo2.get_nodes()), deltalength=vsgeo2.get_deltaLength())
if lvs3 > ds:
vsgeo3 = tunnel_geo()
vsgeo3.create_deltatheta(dth=dth, radius=rs2, length=lvs3)
vsgeo.combine([vsgeo1, vsgeo3, vsgeo2])
else:
vsgeo.combine([vsgeo1, vsgeo2])
return vsgeo
def create_ecoli_tail(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
# create helix
vhobj0 = obj_type()
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = FatHelix() # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand)
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
if with_T_geo:
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
return tail_list
def create_ecoli_tail_bck(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
# create helix
vhobj0 = obj_type()
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = FatHelix() # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand)
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
return tail_list
def create_diskVane_tail(moveh, **kwargs):
r1 = kwargs['diskVane_r1']
r2 = kwargs['diskVane_r2']
rz = kwargs['diskVane_rz']
th_loc = kwargs['diskVane_th_loc']
# ph_loc = kwargs['diskVane_ph_loc']
ds = kwargs['diskVane_ds']
nr = kwargs['diskVane_nr']
nz = kwargs['diskVane_nz']
tgeo = regularizeDisk()
tgeo.create_ds(ds, r2)
tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo.node_rotation(norm=np.array([0, 0, 1]), theta=th_loc, rotation_origin=np.zeros(3))
tgeo.move(np.array((r1, 0, moveh)))
tgeo_list0 = []
trot = 2 * np.pi / nr
for i0 in range(nr):
th = trot * i0
tgeo2 = tgeo.copy()
tgeo2.node_rotation(norm=np.array((0, 0, 1)), theta=th, rotation_origin=np.zeros(3))
tgeo_list0.append(tgeo2)
if np.isclose(nz, 1):
tgeo_list = tgeo_list0
else:
tgeo_list = []
tz = rz / (nz - 1)
for i0 in range(nz):
tmove = tz * i0
th = np.pi * i0
for tgeoi in tgeo_list0:
tgeoj = tgeoi.copy()
tgeoj.move(np.array((0, 0, tmove)))
tgeoj.node_rotation(norm=np.array((0, 0, 1)), theta=th, rotation_origin=np.zeros(3))
tgeo_list.append(tgeoj)
return tgeo_list
def create_selfRepeat_tail(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
repeat_n = kwargs['repeat_n']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
# create helix
vhobj0 = obj_type() # type: sf.StokesFlowObj
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = SelfRepeat_FatHelix(repeat_n) # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand) # type: SelfRepeat_FatHelix
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
if with_T_geo:
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
tail_start_list = []
tail_body0_list = []
tail_end_list = []
for tobj in tail_list:
vhgeo0 = tobj.get_u_geo()
fhgeo0 = tobj.get_f_geo()
#
part_obj = obj_type()
part_ugeo = vhgeo0.get_start_geo()
part_fgeo = fhgeo0.get_start_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_start')
tail_start_list.append(part_obj)
#
part_obj = sf.SelfRepeatObj()
part_ugeo = vhgeo0.get_body_mid_geo()
part_fgeo = fhgeo0.get_body_mid_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_body0')
tail_body0_list.append(part_obj)
#
part_obj = obj_type()
part_ugeo = vhgeo0.get_end_geo()
part_fgeo = fhgeo0.get_end_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_end')
tail_end_list.append(part_obj)
return tail_list, tail_start_list, tail_body0_list, tail_end_list
def create_ecoli_tail_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
return tail_obj
def get_tail_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
n_tail = problem_kwargs['n_tail']
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
return t1, t2
def createEcoli_ellipse(name='...', **kwargs):
ch = kwargs['ch']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor'] if 'zoom_factor' in kwargs.keys() else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - 2 * rs1 + lh) + rs1
movehz = 0.5 * (dist_hs + 2 * rs1 - lh) + lh / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((0, 0, -movehz)) + center # move distance of helix
objtype = sf.obj_dic[matrix_method]
# create tail
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
return vsobj, tail_list
def createEcoli_2tails(name='...', **kwargs):
ch = kwargs['ch']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor'] if 'zoom_factor' in kwargs.keys() else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
objtype = sf.obj_dic[matrix_method]
# create tail
movez = np.array((0, 0, rs1 + dist_hs + lh / 2))
tkwargs = kwargs.copy()
tkwargs['left_hand'] = False
tail_list1 = create_ecoli_tail(-movez, **tkwargs)
tkwargs['left_hand'] = True
tail_list2 = create_ecoli_tail(movez, **tkwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
return vsobj, tail_list1, tail_list2
def createEcoliComp_ellipse(name='...', **kwargs):
vsobj, tail_list = createEcoli_ellipse(name=name, **kwargs)
vsgeo = vsobj.get_u_geo()
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
ecoli_comp = sf.ForceFreeComposite(center=center.copy(), norm=vsgeo.get_geo_norm().copy(),
name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
rot_norm = kwargs['rot_norm']
rot_theta = kwargs['rot_theta'] * np.pi
ecoli_comp.node_rotation(norm=rot_norm.copy(), theta=rot_theta, rotation_origin=center.copy())
return ecoli_comp
def createEcoli_tunnel(**kwargs):
ch = kwargs['ch']
rh1 = kwargs['rh1']
rh2 = kwargs['rh2']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
ls = kwargs['ls']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor']
dist_hs = kwargs['dist_hs']
center = kwargs['center']
rT1 = kwargs['rT1']
rT2 = kwargs['rT2']
ntT = kwargs['ntT']
eT = kwargs['eT']
Tfct = kwargs['Tfct']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - ls + lh) + ls / 2
movehz = -1 * (0.5 * (dist_hs + ls - lh) + lh / 2)
# movesz = (ls + dist_hs) / 2
# movehz = (lh + dist_hs) / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((rT1 - rh1, 0, movehz)) + center # move distance of helix
lT = (rT1 + rh2) * 2
objtype = sf.obj_dic[matrix_method]
# create helix
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsobj = objtype()
node_dof = vsobj.get_n_unknown()
vsgeo = create_capsule(rs1, rs2, ls, ds, node_dof)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
fsgeo.node_zoom_z(1 - ds / (0.5 * (rs1 + rs2)) * es)
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
# create T shape
dtT = 2 * np.pi / ntT
vTobj = objtype()
node_dof = vTobj.get_n_unknown()
# # dbg
# OptDB = PETSc.Options( )
# factor = OptDB.getReal('dbg_move_factor', 1)
# PETSc.Sys.Print('--------------------> DBG: dbg_move_factor = %f' % factor)
# moveT = np.array((0, 0, moveh[-1] + lh / 2 + rh2 * factor))
moveT = np.array((0, 0, movehz + lh / 2)) + center
vTgeo = tunnel_geo()
if 'dualPotential' in matrix_method:
vTgeo.set_check_epsilon(False)
vTgeo.set_dof(node_dof)
fTgeo = vTgeo.create_deltatheta(dth=dtT, radius=rT2, factor=Tfct, length=lT, epsilon=eT,
with_cover=1)
vTobj.set_data(fTgeo, vTgeo, name='T_shape_0')
theta = -np.pi / 2
vTobj.node_rotation(norm=np.array((0, 1, 0)), theta=theta)
vTobj.zoom(zoom_factor)
vTobj.move(moveT * zoom_factor)
theta = np.pi / 4 - ch * np.pi
vsobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
for ti in tail_list:
ti.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
vTobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
return vsobj, tail_list, vTobj
def createEcoliComp_tunnel(name='...', **kwargs):
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
if not with_T_geo:
kwargs['rT1'] = kwargs['rh1']
vsobj, tail_list, vTobj = createEcoli_tunnel(**kwargs)
ecoli_comp = sf.ForceFreeComposite(center, norm=vsobj.get_u_geo().get_geo_norm(), name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
if with_T_geo:
ecoli_comp.add_obj(vTobj, rel_U=rel_Uh)
return ecoli_comp
def create_ecoli_2part(**problem_kwargs):
# create a ecoli contain two parts, one is head and one is tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs[
'update_fun'] if 'update_fun' in problem_kwargs.keys() else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_list = createEcoli_ellipse(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_obj_list)
head_geo = head_obj.get_u_geo()
# ecoli_comp = sf.ForceFreeComposite(center=head_geo.get_center(), norm=head_geo.get_geo_norm(), name='ecoli_0')
ecoli_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=head_geo.get_geo_norm(),
name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj, rel_U=rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_rotlets_tail_2part(rotlet_strength=0, **problem_kwargs):
# create a swimmer with a infinite small head (the limit is a rotlet) and tail(s).
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
dist_hs = problem_kwargs['dist_hs']
lh = ph * ch # length of helix
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj0 = sf.StokesFlowObj()
tail_obj0.combine(tail_list)
tail_obj = sf.FundSoltObj()
tail_obj.set_data(tail_obj0.get_u_geo(), tail_obj0.get_f_geo(), name='rotlets_tail_obj')
location = np.array((0, 0, lh / 2 + dist_hs))
tnorm = tail_obj0.get_u_geo().get_geo_norm()
torque = tnorm * rotlet_strength
tail_obj.add_point_force(location=location, force=torque,
StokesletsHandle=light_rotlets_matrix_3d)
givenT = np.hstack((np.zeros(3), -1 * torque))
ecoli_comp = sf.GivenForceComposite(center=np.zeros(3), norm=tnorm,
name='rotlets_tail_comp', givenF=givenT)
ecoli_comp.add_obj(obj=tail_obj, rel_U=np.zeros(6))
update_order = problem_kwargs['update_order'] \
if 'update_order' in problem_kwargs.keys() \
else 1
update_fun = problem_kwargs['update_fun'] \
if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_2part_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
tail_obj = ecoli_comp.get_obj_list()[1]
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
return ecoli_comp
def get_ecoli_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
tail_obj = ecoli_comp.get_obj_list()[1]
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
n_tail = problem_kwargs['n_tail']
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
t3 = ecoli_comp.get_obj_list()[0].get_u_nodes()
return t1, t2, t3
def create_ecoli_dualTail(**problem_kwargs):
# create a swimmer with two tails in the ends. one is left hand and one is right hand.
# the swimmer contain three parts, i.e. head, upper tail and down tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs['update_fun'] if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_l1, tail_obj_l2 = createEcoli_2tails(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj1 = sf.StokesFlowObj()
tail_obj1.set_name('tail_obj1')
tail_obj1.combine(tail_obj_l1)
tail_obj2 = sf.StokesFlowObj()
tail_obj2.set_name('tail_obj2')
tail_obj2.combine(tail_obj_l2)
head_geo = head_obj.get_u_geo()
tnorm = head_geo.get_geo_norm()
ecoli_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=tnorm, name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj1, rel_U=rel_Uh)
ecoli_comp.add_obj(obj=tail_obj2, rel_U=-rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_dualTail_at(theta, phi, psi_tail1, psi_tail2, center=np.zeros(3),
**problem_kwargs):
assert 1 == 2
ecoli_comp = create_ecoli_dualTail(**problem_kwargs)
# ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
# ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
# tail_obj1 = ecoli_comp.get_obj_list()[1]
# tail_obj1.node_rotation(tail_obj1.get_u_geo().get_geo_norm(), psi_tail1)
# tail_obj2 = ecoli_comp.get_obj_list()[2]
# tail_obj2.node_rotation(tail_obj2.get_u_geo().get_geo_norm(), psi_tail2)
return ecoli_comp
def create_sphere(namehandle='sphereObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs = kwargs['rs']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
obj_sphere = objtype()
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs)
sphere_geo0.set_rigid_velocity([0, 0, 0, 0, 0, 0])
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom((rs + ds * es) / rs)
obj_sphere.set_data(sphere_geo1, sphere_geo0)
obj_list = []
for i0, (t_coord, t_velocity) in enumerate(zip(sphere_coord, sphere_velocity)):
obj2 = obj_sphere.copy()
obj2.set_name('%s_%d' % (namehandle, i0))
obj2.move(t_coord)
obj2.get_u_geo().set_rigid_velocity(t_velocity)
obj_list.append(obj2)
return obj_list
def create_one_ellipse(namehandle='sphereObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
obj_sphere = objtype() # type: sf.StokesFlowObj
sphere_geo0 = ellipse_base_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs1, rs2)
sphere_geo0.set_rigid_velocity(sphere_velocity)
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
obj_sphere.set_data(sphere_geo1, sphere_geo0, name=namehandle)
obj_sphere.move(sphere_coord)
return obj_sphere
def create_move_single_sphere(namehandle='sphereObj', **kwargs):
movez = kwargs['movez']
obj_sphere = create_sphere(namehandle, **kwargs)[0]
displacement = np.array((0, 0, movez))
obj_sphere.move(displacement)
obj_list = (obj_sphere,)
return obj_list
def create_rod(namehandle='rod_obj', **problem_kwargs):
rRod = problem_kwargs['rRod']
lRod = problem_kwargs['lRod']
ntRod = problem_kwargs['ntRod']
eRod = problem_kwargs['eRod']
Rodfct = problem_kwargs['Rodfct']
RodThe = problem_kwargs['RodThe']
RodPhi = problem_kwargs['RodPhi']
rel_URod = problem_kwargs['rel_URod']
RodCenter = problem_kwargs['RodCenter']
zoom_factor = problem_kwargs['zoom_factor']
givenF = problem_kwargs['givenF']
matrix_method = problem_kwargs['matrix_method']
dth = 2 * np.pi / ntRod
rod_geo = tunnel_geo()
rod_geo.create_deltatheta(dth=dth, radius=rRod, length=lRod, epsilon=eRod,
with_cover=1, factor=Rodfct, left_hand=False)
# first displace the rod above the surface, rotate to horizon.
rod_geo.move(displacement=RodCenter)
rod_geo.node_zoom(factor=zoom_factor, zoom_origin=RodCenter)
norm = np.array((0, 1, 0))
theta = -np.pi / 2
rod_geo.node_rotation(norm=norm, theta=theta, rotation_origin=RodCenter)
# then, the rod is rotate in a specified plane, which is parabled to XY plane (the wall) first, then
# rotated angle theta, of an angle phi.
norm = np.array((0, np.sin(RodPhi), np.cos(RodPhi)))
rod_geo.node_rotation(norm=norm, theta=-RodThe, rotation_origin=RodCenter)
rod_obj = sf.obj_dic[matrix_method]()
name = namehandle + '_obj_0'
rod_obj.set_data(f_geo=rod_geo, u_geo=rod_geo, name=name)
name = namehandle + '_0'
rod_comp = sf.GivenForceComposite(center=RodCenter, name=name, givenF=givenF.copy())
rod_comp.add_obj(obj=rod_obj, rel_U=rel_URod)
rod_list = (rod_comp,)
return rod_list
def create_infHelix(namehandle='infhelix', normalize=False, **problem_kwargs):
n_tail = problem_kwargs['n_tail']
eh = problem_kwargs['eh']
ch = problem_kwargs['ch']
rh1 = problem_kwargs['rh1']
rh2 = problem_kwargs['rh2']
ph = problem_kwargs['ph']
nth = problem_kwargs['nth']
zoom_factor = problem_kwargs['zoom_factor']
if normalize:
rh2 = rh2 * zoom_factor
ph = ph * zoom_factor
rh1 = rh1 * zoom_factor
helix_list = []
for i0, theta0 in enumerate(np.linspace(0, 2 * np.pi, n_tail, endpoint=False)):
infhelix_ugeo = infHelix()
infhelix_ugeo.create_n(rh1, rh2, ph, ch, nth, theta0=theta0)
infhelix_fgeo = infhelix_ugeo.create_fgeo(epsilon=eh)
infhelix_obj = sf.StokesFlowObj()
infhelix_obj.set_data(f_geo=infhelix_fgeo, u_geo=infhelix_ugeo,
name=namehandle + '%02d' % i0)
helix_list.append(infhelix_obj)
return helix_list
def create_helicoid_list(namehandle='helicoid', **problem_kwargs):
r1 = problem_kwargs['helicoid_r1']
r2 = problem_kwargs['helicoid_r2']
ds = problem_kwargs['helicoid_ds']
th_loc = problem_kwargs['helicoid_th_loc']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
matrix_method = problem_kwargs['matrix_method']
assert matrix_method in ('rs', 'lg_rs')
assert ndsk_each == 4
tgeo = regularizeDisk()
tgeo.create_ds(ds, r2)
tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
tgeo.move(np.array((r1, 0, 0)))
# tgeo.show_nodes()
tgeo_list = []
rot_dth = 2 * np.pi / ndsk_each
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + np.pi / 4
# rot_th = i0 * rot_dth
tgeo21 = tgeo.copy()
tgeo21.node_rotation(norm=np.array([0, 0, 1]), theta=rot_th, rotation_origin=np.zeros(3))
tgeo22 = tgeo21.copy()
tgeo_list.append(tgeo21)
tgeo22.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo23 = tgeo21.copy()
tgeo_list.append(tgeo22)
tgeo23.node_rotation(norm=np.array([0, 1, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo_list.append(tgeo23)
# tgeo3 = base_geo()
# tgeo3.combine(tgeo_list)
# tgeo3.show_nodes(linestyle='')
tobj_list = []
for i0, tgeo in enumerate(tgeo_list):
tobj = sf.StokesFlowObj()
tobj.set_matrix_method(matrix_method) # the geo is regularizeDisk
tobj.set_data(f_geo=tgeo, u_geo=tgeo, name=namehandle + '%02d' % i0)
tobj_list.append(tobj)
return tobj_list
def create_helicoid_comp(*args, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
helicoid_list = create_helicoid_list(*args, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
# print(tobj)
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def obj2helicoid_list(tobj0, **problem_kwargs):
# assert 1 == 2
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
tobj = tobj0.copy()
tobj.move(np.array((helicoid_r, 0, 0)))
tobj_list = []
rot_dth = 2 * np.pi / ndsk_each
namehandle = tobj.get_name()
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + np.pi / 4
# rot_th = i0 * rot_dth
tobj21 = tobj.copy()
tobj21.set_name('%s_%02d_%01d' % (namehandle, i0, 1))
tobj21.node_rotation(norm=np.array([0, 0, 1]), theta=rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobj21)
tobj22 = tobj21.copy()
tobj22.set_name('%s_%02d_%01d' % (namehandle, i0, 2))
tobj22.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tobj_list.append(tobj22)
tobj23 = tobj21.copy()
tobj23.set_name('%s_%02d_%01d' % (namehandle, i0, 3))
tobj23.node_rotation(norm=np.array([0, 1, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tobj_list.append(tobj23)
return tobj_list
def obj2helicoid_list_v2(tobj0, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj0.get_u_geo().get_center()), 0)
namehandle = tobj0.get_name()
t1 = helicoid_r / np.sqrt(2)
tobj0.move((t1, t1, 0))
tobj1 = tobj0.copy()
tobj1.node_rotation(np.array((1, 0, 0)), np.pi / 2, rotation_origin=np.zeros(3))
tobj2 = tobj0.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj_list = []
rot_dth = 2 * np.pi / ndsk_each
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + helicoid_th0
for i1, tobji in enumerate((tobj0, tobj1, tobj2)):
tobji_i0 = tobji.copy()
tobji_i0.set_name('%s_%02d_%01d' % (namehandle, i0, i1))
tobji_i0.node_rotation(np.array((0, 0, 1)), rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobji_i0)
return tobj_list
def obj2helicoid_list_v3(tobj, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj.get_u_geo().get_center()), 0)
namehandle = tobj.get_name()
rot_dth = 2 * np.pi / ndsk_each
tobj.move((helicoid_r, 0, 0))
tobj0 = tobj.copy()
tobj0.node_rotation(np.array((0, 0, 1)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj1 = tobj.copy()
tobj1.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj1.node_rotation(np.array((0, 1, 0)), rot_dth / 2, rotation_origin=np.zeros(3))
tobj2 = tobj.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj2.node_rotation(np.array((0, 1, 0)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj_list = []
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + helicoid_th0
for i1, tobji in enumerate((tobj0, tobj1, tobj2)):
tobji_i0 = tobji.copy()
tobji_i0.set_name('%s_%02d_%01d' % (namehandle, i0, i1))
tobji_i0.node_rotation(np.array((0, 0, 1)), rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobji_i0)
return tobj_list
def obj2helicoid_list_selfRotate(tobj, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
# helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj.get_u_geo().get_center()), 0)
# namehandle = tobj.get_name()
rot_dth = 2 * np.pi / ndsk_each
tobj.move((helicoid_r, 0, 0))
tobj0 = tobj.copy()
tobj0.node_rotation(np.array((0, 0, 1)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj1 = tobj.copy()
tobj1.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj1.node_rotation(np.array((0, 1, 0)), rot_dth / 2, rotation_origin=np.zeros(3))
tobj2 = tobj.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj2.node_rotation(np.array((0, 1, 0)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj_list = [tobj0, tobj1, tobj2]
return tobj_list
def obj2helicoid_comp(tobj0, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
# helicoid_list = obj2helicoid_list(tobj0, *args, **kwargs)
helicoid_list = obj2helicoid_list_v3(tobj0, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def obj2helicoid_comp_selfRotate(tobj0, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
# helicoid_list = obj2helicoid_list(tobj0, *args, **kwargs)
helicoid_list = obj2helicoid_list_selfRotate(tobj0, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def creat_helicoid_dumb(**problem_kwargs):
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
sphere_geo1 = sphere_geo0.copy()
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
dumb_geo = base_geo()
dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
tobj = sf.StokesFlowObj()
tobj.set_data(dumb_geo, dumb_geo, 'helicoid_dumb')
helicoid_comp = obj2helicoid_comp(tobj, **problem_kwargs)
return helicoid_comp
def creat_helicoid_dumb_selfRotate(**problem_kwargs):
matrix_method = problem_kwargs['matrix_method']
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
es = problem_kwargs['es']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
sphere_geo0f = sphere_geo0.copy()
sphere_geo0f.node_zoom(1 + ds * es / rs)
sphere_geo1 = sphere_geo0.copy()
sphere_geo1f = sphere_geo0f.copy()
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
sphere_geo0f.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1f.move(np.array((0, 0, -dumb_d / 2)))
dumb_geo = base_geo()
dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
dumb_geof = base_geo()
dumb_geof.combine([sphere_geo0f, sphere_geo1f], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geof.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
tobj = sf.obj_dic[matrix_method]()
tobj.set_data(dumb_geof, dumb_geo, name='helicoid_dumb')
helicoid_comp = obj2helicoid_comp_selfRotate(tobj, **problem_kwargs)
return helicoid_comp
| 0.002428 |
import uuid
from django import forms
from django.utils.translation import ugettext as _
from django.utils.text import slugify
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, HTML, Layout, Fieldset
from crispy_forms.bootstrap import PrependedText
from django.template import RequestContext
from .models import Event
class EventForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
super(EventForm, self).__init__(*args, **kwargs)
@property
def helper(self):
helper = FormHelper()
helper.render_unmentioned_fields = True
helper.form_tag = False
helper.layout = Layout(
Fieldset(
_(u'Event'),
Div(Div(Field('summary', placeholder=_(u'Event summary')), css_class='col-sm-6'),
Div(Field('category', placeholder=_("Category")), css_class='col-sm-6'),
css_class='row'
),
Div(Div(Field('tags', placeholder=_("keyword, keyword 1, key word 2")), css_class="col-sm-12"),
css_class="row"
),
Div(Div(Field('description', help_text=_('tada')), css_class="col-sm-12"),
css_class="row"
),
),
Fieldset(
_(u'Contact'),
Div(Div(Field('email', placeholder=_("Contact Email")), css_class="col-sm-6"),
Div(Field('web', placeholder=_("Web site URL")), css_class="col-sm-6"),
css_class="row"
),
),
Fieldset(
_(u'Date'),
Div(Div(Field('dtstart', placeholder=_(u'From')), css_class='col-sm-6'),
Div(Field('dtend', placeholder=_(u'To')), css_class='col-sm-6'),
css_class='row'
),
Div(Div(Field('allday'), css_class='col-sm-12'),
css_class='row'
),
),
Fieldset(
_(u'Location'),
Div(Div(Field('country', placeholder=_("country")), HTML('''{% load i18n %}<p class="help-block"><button id="geosearch-button" type="button" class="btn btn-default">
{% trans 'Geolocate' %}</button> Try to geolocate from the given address.
</p>'''), css_class="col-sm-6"),
Div(Field('address', placeholder=_(
'''Street
\n
ZipCode City''')), css_class="col-sm-6"),
css_class="row"
),
Div(Div(HTML('''<div style="height:350px;" id="map"></div><br>'''), css_class="col-sm-12"),
css_class="row"),
Div(Div(Field('lat', placeholder=_("Location latitude")), css_class="col-sm-6"),
Div(Field('lon', placeholder=_("Location longitude")), css_class="col-sm-6"),
css_class="row"),
),
)
return helper
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'cols': 10, 'rows': 6}),
'address': forms.Textarea(attrs={'cols': 10, 'rows': 3}),
'dtstart': forms.DateInput(attrs={'class': 'datepicker'}),
'dtend': forms.DateInput(attrs={'class': 'datepicker'}),
'lat':forms.HiddenInput(),
'lon':forms.HiddenInput(),
}
#fields = "__all__"
exclude =['slug', 'pub_status', 'status', 'sequence', 'creation_user']
class Media:
js = ["/static/thedirectory/js/map_mini.js"]
def save(self, commit=True):
u'''Add the slug value'''
event = super(EventForm, self).save(commit=False)
if not event.slug:
event.slug = slugify(self.cleaned_data.get('summary'))
try:
Event.objects.get(slug=event.slug)
event.slug = "%s-%s" % (event.slug, str(uuid.uuid4())[:5])
except Event.DoesNotExist:
pass
if commit:
event.creation_user = self.user
event.save()
self.save_m2m()
return event
| 0.005959 |
"""Views handler for ask admin page rendering."""
from django.core.urlresolvers import reverse
import simplejson as json
from django.http import Http404, HttpResponse
from django.template.loader import render_to_string
from django.core.mail.message import EmailMultiAlternatives
from apps.managers.challenge_mgr import challenge_mgr
from apps.widgets.ask_admin.forms import FeedbackForm
def supply(request, page_name):
"""Supply view_objects for widget rendering, returns form."""
_ = request
_ = page_name
form = FeedbackForm(auto_id="help_%s")
form.url = reverse("help_index")
return {
"form": form,
}
def send_feedback(request):
"""send feedback."""
if request.method == "POST":
form = FeedbackForm(request.POST)
if form.is_valid():
html_message = render_to_string("email/ask_admin.html", {
"user": request.user,
"url": form.cleaned_data["url"],
"question": form.cleaned_data["question"],
})
message = render_to_string("email/ask_admin.txt", {
"user": request.user,
"url": form.cleaned_data["url"],
"question": form.cleaned_data["question"],
})
challenge = challenge_mgr.get_challenge()
# Using adapted version from Django source code
subject = u'[%s] %s asked a question' % (
challenge.name,
request.user.get_profile().name)
if challenge.email_enabled or True:
mail = EmailMultiAlternatives(subject, message, challenge.contact_email,
[challenge.contact_email, ], headers={
"Reply-To": request.user.email})
mail.attach_alternative(html_message, 'text/html')
print html_message
mail.send()
#print "email sent %s" % html_message
if request.is_ajax():
return HttpResponse(json.dumps({"success": True}),
mimetype="application/json")
raise Http404
| 0.001871 |
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
from six import text_type
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing `tzres.dll`, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError('tzwinbase is an abstract base class')
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._stddayofweek == other._stddayofweek and
self._dstdayofweek == other._dstdayofweek and
self._stdweeknumber == other._stdweeknumber and
self._dstweeknumber == other._dstweeknumber and
self._stdhour == other._stdhour and
self._dsthour == other._dsthour and
self._stdminute == other._stdminute and
self._dstminute == other._dstminute and
self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
return result
def display(self):
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
dstoffset = stdoffset-keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout
| 0.000618 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
feed2db works to turn text-based feed list into database
"""
# @author chengdujin
# @contact [email protected]
# @created Jul. 30, 2013
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
sys.path.append('../..')
from config.settings import Collection
from config.settings import db
# CONSTANTS
from config.settings import FEED_REGISTRAR
#FILE_PREFIX = '/home/work/newsman/newsman/bin/text_based_feeds/feed_lists/'
#FILE_PREFIX = '/home/users/jinyuan/newsman/newsman/bin/text_based_feeds
# /feed_lists/'
#FILE_PREFIX = '/home/ubuntu/newsman/newsman/bin/text_based_feeds/feed_lists/'
#FILE_PREFIX = '/home/jinyuan/Downloads/newsman/newsman/bin/text_based_feeds
# /feed_lists/'
def _parse_task(line):
"""
read *_feeds_list.txt
"""
line = line.strip()
if line:
task = line.strip().split('*|*')
# task[1] refers to categories
if len(task) == 5:
return task[0].strip(), task[1].strip(), task[2].strip(), task[
3].strip(), task[4].strip(), None
else:
return task[0].strip(), task[1].strip(), task[2].strip(), task[
3].strip(), task[4].strip(), task[5].strip()
else:
return None
def _convert(language='en', country=None):
"""
turn text-based feed infor into database items
Note. 1. categories: [(), ()]
"""
# read in file content
feeds_list = open('%s%s_%s_feeds_list' %
(FILE_PREFIX, language, country), 'r')
lines = feeds_list.readlines()
feeds_list.close()
link_dict = title_dict = {}
for line in lines:
if line.strip():
language, category, transcoder, link, title, labels = _parse_task(
line)
# link check
if link not in link_dict:
link_dict[link] = title
else:
print link, title
# title check
if title not in title_dict:
title_dict[title] = link
else:
print title, link
if __name__ == "__main__":
if len(sys.argv) > 1:
_convert(sys.argv[1], sys.argv[2])
else:
print 'Please indicate a language and country'
| 0.003552 |
#!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
"""
The mem command provides direct access to read and write physical memory.
"""
import os
import time
import chipsec_util
import chipsec.defines
import chipsec.file
from chipsec.logger import print_buffer
from chipsec.command import BaseCommand
def read_mem(pa, size = chipsec.defines.BOUNDARY_4KB):
try:
buffer = chipsec_util._cs.mem.read_physical_mem( pa, size )
except:
buffer = None
return buffer
def dump_region_to_path(path, pa_start, pa_end):
pa = (pa_start + chipsec.defines.ALIGNED_4KB) & ~chipsec.defines.ALIGNED_4KB
end = pa_end & ~chipsec.defines.ALIGNED_4KB
head_len = pa - pa_start
tail_len = pa_end - end
f = None
# read leading bytes to the next boundary
if (head_len > 0):
b = read_mem(pa_start, head_len)
if b is not None:
fname = os.path.join(path, "m%016X.bin" % pa_start)
f = open(fname, 'wb')
f.write(b)
while pa < end:
b = read_mem(pa)
if b is not None:
if f is None:
fname = os.path.join(path, "m%016X.bin" % pa)
f = open(fname, 'wb')
f.write(b)
else:
if f is not None:
f.close()
f = None
pa += chipsec.defines.BOUNDARY_4KB
# read trailing bytes
if (tail_len > 0):
b = read_mem(end, tail_len)
if b is not None:
if f is None:
fname = os.path.join(path, "m%016X.bin" % end)
f = open(fname, 'wb')
f.write(b)
if f is not None:
f.close()
# Physical Memory
class MemCommand(BaseCommand):
"""
>>> chipsec_util mem <op> <physical_address> <length> [value|buffer_file]
>>>
>>> <physical_address> : 64-bit physical address
>>> <op> : read|readval|write|writeval|allocate|pagedump
>>> <length> : byte|word|dword or length of the buffer from <buffer_file>
>>> <value> : byte, word or dword value to be written to memory at <physical_address>
>>> <buffer_file> : file with the contents to be written to memory at <physical_address>
Examples:
>>> chipsec_util mem <op> <physical_address> <length> [value|file]
>>> chipsec_util mem readval 0xFED40000 dword
>>> chipsec_util mem read 0x41E 0x20 buffer.bin
>>> chipsec_util mem writeval 0xA0000 dword 0x9090CCCC
>>> chipsec_util mem write 0x100000000 0x1000 buffer.bin
>>> chipsec_util mem write 0x100000000 0x10 000102030405060708090A0B0C0D0E0F
>>> chipsec_util mem allocate 0x1000
>>> chipsec_util mem pagedump 0xFED00000 0x100000
>>> chipsec_util mem search 0xF0000 0x10000 _SM_
"""
def requires_driver(self):
# No driver required when printing the util documentation
if len(self.argv) < 3:
return False
return True
def run(self):
size = 0x100
if len(self.argv) < 3:
print MemCommand.__doc__
return
op = self.argv[2]
t = time.time()
if 'allocate' == op and 4 == len(self.argv):
size = int(self.argv[3],16)
(va, pa) = self.cs.mem.alloc_physical_mem( size )
self.logger.log( '[CHIPSEC] Allocated %X bytes of physical memory: VA = 0x%016X, PA = 0x%016X' % (size, va, pa) )
elif 'search' == op and len(self.argv) > 5:
phys_address = int(self.argv[3],16)
size = int(self.argv[4],16)
buffer = self.cs.mem.read_physical_mem( phys_address, size )
offset = buffer.find(self.argv[5])
if offset != -1:
self.logger.log( '[CHIPSEC] search buffer from memory: PA = 0x%016X, len = 0x%X, target address= 0x%X..' % (phys_address, size, phys_address + offset) )
else:
self.logger.log( '[CHIPSEC] search buffer from memory: PA = 0x%016X, len = 0x%X, can not find the target in the searched range..' % (phys_address, size) )
elif 'pagedump' == op and len(self.argv) > 3:
start = long(self.argv[3],16)
length = long(self.argv[4],16) if len(self.argv) > 4 else chipsec.defines.BOUNDARY_4KB
end = start + length
dump_region_to_path( chipsec.file.get_main_dir(), start, end )
elif 'read' == op:
phys_address = int(self.argv[3],16)
size = int(self.argv[4],16) if len(self.argv) > 4 else 0x100
self.logger.log( '[CHIPSEC] reading buffer from memory: PA = 0x%016X, len = 0x%X..' % (phys_address, size) )
buffer = self.cs.mem.read_physical_mem( phys_address, size )
if len(self.argv) > 5:
buf_file = self.argv[5]
chipsec.file.write_file( buf_file, buffer )
self.logger.log( "[CHIPSEC] written 0x%X bytes to '%s'" % (len(buffer), buf_file) )
else:
print_buffer( buffer )
elif 'readval' == op:
phys_address = int(self.argv[3],16)
width = 0x4
if len(self.argv) > 4:
width = chipsec_util.get_option_width(self.argv[4]) if chipsec_util.is_option_valid_width(self.argv[4]) else int(self.argv[4],16)
self.logger.log( '[CHIPSEC] reading %X-byte value from PA 0x%016X..' % (width, phys_address) )
if 0x1 == width: value = self.cs.mem.read_physical_mem_byte ( phys_address )
elif 0x2 == width: value = self.cs.mem.read_physical_mem_word ( phys_address )
elif 0x4 == width: value = self.cs.mem.read_physical_mem_dword( phys_address )
self.logger.log( '[CHIPSEC] value = 0x%X' % value )
elif 'write' == op:
phys_address = int(self.argv[3],16)
if len(self.argv) > 4:
size = int(self.argv[4],16)
else:
self.logger.error( "must specify <length> argument in 'mem write'" )
return
if len(self.argv) > 5:
buf_file = self.argv[5]
if not os.path.exists( buf_file ):
#buffer = buf_file.decode('hex')
try:
buffer = bytearray.fromhex(buf_file)
except ValueError, e:
self.logger.error( "incorrect <value> specified: '%s'" % buf_file )
self.logger.error( str(e) )
return
self.logger.log( "[CHIPSEC] read 0x%X hex bytes from command-line: %s'" % (len(buffer), buf_file) )
else:
buffer = chipsec.file.read_file( buf_file )
self.logger.log( "[CHIPSEC] read 0x%X bytes from file '%s'" % (len(buffer), buf_file) )
if len(buffer) < size:
self.logger.error( "number of bytes read (0x%X) is less than the specified <length> (0x%X)" % (len(buffer),size) )
return
self.logger.log( '[CHIPSEC] writing buffer to memory: PA = 0x%016X, len = 0x%X..' % (phys_address, size) )
self.cs.mem.write_physical_mem( phys_address, size, buffer )
else:
self.logger.error( "must specify <buffer>|<file> argument in 'mem write'" )
return
elif 'writeval' == op:
phys_address = int(self.argv[3],16)
if len(self.argv) > 4:
width = chipsec_util.get_option_width(self.argv[4]) if chipsec_util.is_option_valid_width(self.argv[4]) else int(self.argv[4],16)
else:
self.logger.error( "must specify <length> argument in 'mem writeval' as one of %s" % chipsec_util.CMD_OPTS_WIDTH )
return
if len(self.argv) > 5:
value = int(self.argv[5],16)
else:
self.logger.error( "must specify <value> argument in 'mem writeval'" )
return
self.logger.log( '[CHIPSEC] writing %X-byte value 0x%X to PA 0x%016X..' % (width, value, phys_address) )
if 0x1 == width: self.cs.mem.write_physical_mem_byte ( phys_address, value )
elif 0x2 == width: self.cs.mem.write_physical_mem_word ( phys_address, value )
elif 0x4 == width: self.cs.mem.write_physical_mem_dword( phys_address, value )
else:
print MemCommand.__doc__
return
self.logger.log( "[CHIPSEC] (mem) time elapsed %.3f" % (time.time()-t) )
commands = { 'mem': MemCommand }
| 0.018573 |
# -*- coding: utf-8 -*-
import pytest
from django.utils import timezone
from nose.tools import * # noqa
from framework.auth.core import Auth
from osf_tests.factories import AuthUserFactory, ProjectFactory, UserFactory
from scripts import parse_citation_styles
from tests.base import OsfTestCase
from osf.models import OSFUser as User, AbstractNode as Node
from website.citations.utils import datetime_to_csl
from website.util import api_url_for
pytestmark = pytest.mark.django_db
class CitationsUtilsTestCase(OsfTestCase):
def test_datetime_to_csl(self):
# Convert a datetime instance to csl's date-variable schema
now = timezone.now()
assert_equal(
datetime_to_csl(now),
{'date-parts': [[now.year, now.month, now.day]]},
)
class CitationsNodeTestCase(OsfTestCase):
def setUp(self):
super(CitationsNodeTestCase, self).setUp()
self.node = ProjectFactory()
def tearDown(self):
super(CitationsNodeTestCase, self).tearDown()
Node.remove()
User.remove()
def test_csl_single_author(self):
# Nodes with one contributor generate valid CSL-data
assert_equal(
self.node.csl,
{
'publisher': 'Open Science Framework',
'author': [{
'given': self.node.creator.given_name,
'family': self.node.creator.family_name,
}],
'URL': self.node.display_absolute_url,
'issued': datetime_to_csl(self.node.logs.latest().date),
'title': self.node.title,
'type': 'webpage',
'id': self.node._id,
},
)
def test_csl_multiple_authors(self):
# Nodes with multiple contributors generate valid CSL-data
user = UserFactory()
self.node.add_contributor(user)
self.node.save()
assert_equal(
self.node.csl,
{
'publisher': 'Open Science Framework',
'author': [
{
'given': self.node.creator.given_name,
'family': self.node.creator.family_name,
},
{
'given': user.given_name,
'family': user.family_name,
}
],
'URL': self.node.display_absolute_url,
'issued': datetime_to_csl(self.node.logs.latest().date),
'title': self.node.title,
'type': 'webpage',
'id': self.node._id,
},
)
def test_non_visible_contributors_arent_included_in_csl(self):
node = ProjectFactory()
visible = UserFactory()
node.add_contributor(visible, auth=Auth(node.creator))
invisible = UserFactory()
node.add_contributor(invisible, auth=Auth(node.creator), visible=False)
node.save()
assert_equal(len(node.csl['author']), 2)
expected_authors = [
contrib.csl_name for contrib in [node.creator, visible]
]
assert_equal(node.csl['author'], expected_authors)
class CitationsUserTestCase(OsfTestCase):
def setUp(self):
super(CitationsUserTestCase, self).setUp()
self.user = UserFactory()
def tearDown(self):
super(CitationsUserTestCase, self).tearDown()
User.remove()
def test_user_csl(self):
# Convert a User instance to csl's name-variable schema
assert_equal(
self.user.csl_name,
{
'given': self.user.given_name,
'family': self.user.family_name,
},
)
class CitationsViewsTestCase(OsfTestCase):
@pytest.fixture(autouse=True)
def _parsed_citation_styles(self):
# populate the DB with parsed citation styles
try:
parse_citation_styles.main()
except OSError:
pass
def test_list_styles(self):
# Response includes a list of available citation styles
response = self.app.get(api_url_for('list_citation_styles'))
assert_true(response.json)
assert_equal(
len(
[
style for style in response.json['styles']
if style.get('id') == 'bibtex'
]
),
1,
)
def test_list_styles_filter(self):
# Response includes a list of available citation styles
response = self.app.get(api_url_for('list_citation_styles', q='bibtex'))
assert_true(response.json)
assert_equal(
len(response.json['styles']), 1
)
assert_equal(
response.json['styles'][0]['id'], 'bibtex'
)
def test_node_citation_view(self):
node = ProjectFactory()
user = AuthUserFactory()
node.add_contributor(user)
node.save()
response = self.app.get("/api/v1" + "/project/" + node._id + "/citation/", auto_follow=True, auth=user.auth)
assert_true(response.json)
| 0.000777 |
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, cPickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
try:
import codecs
except ImportError:
codecs = None
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
#print "No need to rollover: %d, %d" % (t, self.rolloverAt)
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = cPickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=socket.SOCK_DGRAM):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, basestring):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.formatter = None
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, basestring):
facility = self.facility_names[facility]
if isinstance(priority, basestring):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record) + '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
# Message is a string. Convert to bytes as required by RFC 5424
if type(msg) is unicode:
msg = msg.encode('utf-8')
if codecs:
msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, basestring):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import httplib, urllib
host = self.host
h = httplib.HTTP(host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
h.endheaders(data if self.method == "POST" else None)
h.getreply() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
| 0.003544 |
# -*- coding: utf-8 -*-
"""
Common Utilities run most requests
"""
# =============================================================================
# Special local requests (e.g. from scheduler)
#
if request.is_local:
# This is a request made from the local server
f = get_vars.get("format", None)
auth_token = get_vars.get("subscription", None)
if auth_token and f == "msg":
# Subscription lookup request (see S3Notify.notify())
rtable = s3db.pr_subscription_resource
stable = s3db.pr_subscription
utable = s3db.pr_person_user
join = [stable.on(stable.id == rtable.subscription_id),
utable.on(utable.pe_id == stable.pe_id)]
user = db(rtable.auth_token == auth_token).select(utable.user_id,
join=join,
limitby=(0, 1)) \
.first()
if user:
# Impersonate subscriber
auth.s3_impersonate(user.user_id)
else:
# Anonymous request
auth.s3_impersonate(None)
# =============================================================================
# Check Permissions & fail as early as we can
#
# Set user roles
# - requires access to tables
auth.s3_set_roles()
# Check access to this controller
if not auth.permission.has_permission("read"):
auth.permission.fail()
# =============================================================================
# Initialize Date/Time Settings
#
s3base.s3_get_utc_offset()
# =============================================================================
# Menus
#
from s3layouts import *
import s3menus as default_menus
S3MainMenu = default_menus.S3MainMenu
S3OptionsMenu = default_menus.S3OptionsMenu
current.menu = Storage(oauth="", options=None, override={})
if auth.permission.format in ("html"):
theme = settings.get_theme()
package = "applications.%s.modules.templates.%%s.menus" % appname
menu_locations = []
if theme != "default":
if s3.theme_location:
theme = "%s.%s" % (s3.theme_location[:-1], theme)
menu_locations.append(theme)
else:
template = settings.get_template()
if isinstance(template, (tuple, list)):
menu_locations.extend(template)
else:
menu_locations.append(template)
for name in menu_locations:
if name == "default":
# Using s3menus.py
continue
try:
deployment_menus = __import__(package % name,
fromlist=["S3MainMenu",
"S3OptionsMenu",
],
)
except ImportError:
# No menus.py (using except is faster than os.stat)
continue
else:
if hasattr(deployment_menus, "S3MainMenu"):
S3MainMenu = deployment_menus.S3MainMenu
if hasattr(deployment_menus, "S3OptionsMenu"):
S3OptionsMenu = deployment_menus.S3OptionsMenu
# Instantiate main menu
main = S3MainMenu.menu()
else:
main = None
menu = current.menu
menu["main"] = main
# Override controller menus
# @todo: replace by current.menu.override
s3_menu_dict = {}
# -----------------------------------------------------------------------------
def s3_rest_controller(prefix=None, resourcename=None, **attr):
"""
Helper function to apply the S3Resource REST interface
@param prefix: the application prefix
@param resourcename: the resource name (without prefix)
@param attr: additional keyword parameters
Any keyword parameters will be copied into the output dict (provided
that the output is a dict). If a keyword parameter is callable, then
it will be invoked, and its return value will be added to the output
dict instead. The callable receives the S3Request as its first and
only parameter.
CRUD can be configured per table using:
s3db.configure(tablename, **attr)
*** Redirection:
create_next URL to redirect to after a record has been created
update_next URL to redirect to after a record has been updated
delete_next URL to redirect to after a record has been deleted
*** Form configuration:
list_fields list of names of fields to include into list views
subheadings Sub-headings (see separate documentation)
listadd Enable/Disable add-form in list views
*** CRUD configuration:
editable Allow/Deny record updates in this table
deletable Allow/Deny record deletions in this table
insertable Allow/Deny record insertions into this table
copyable Allow/Deny record copying within this table
*** Callbacks:
create_onvalidation Function for additional record validation on create
create_onaccept Function after successful record insertion
update_onvalidation Function for additional record validation on update
update_onaccept Function after successful record update
onvalidation Fallback for both create_onvalidation and update_onvalidation
onaccept Fallback for both create_onaccept and update_onaccept
ondelete Function after record deletion
"""
# Customise Controller from Template
attr = settings.customise_controller("%s_%s" % (prefix or request.controller,
resourcename or request.function),
**attr)
# Parse the request
r = s3_request(prefix, resourcename)
# Customize target resource(s) from Template
r.customise_resource()
# Configure standard method handlers
set_handler = r.set_handler
from s3db.cms import S3CMS
set_handler("cms", S3CMS)
set_handler("compose", s3base.S3Compose)
# @ToDo: Make work in Component Tabs:
set_handler("copy", lambda r, **attr: \
redirect(URL(args="create",
vars={"from_record":r.id})))
set_handler("deduplicate", s3base.S3Merge)
set_handler("filter", s3base.S3Filter)
set_handler("hierarchy", s3base.S3HierarchyCRUD)
set_handler("import", s3base.S3Importer)
set_handler("xform", s3base.S3XForms)
set_handler("map", s3base.S3Map)
set_handler("profile", s3base.S3Profile)
set_handler("report", s3base.S3Report)
set_handler("report", s3base.S3Report, transform=True)
set_handler("timeplot", s3base.S3TimePlot)
set_handler("grouped", s3base.S3GroupedItemsReport)
set_handler("search_ac", s3base.search_ac)
set_handler("summary", s3base.S3Summary)
# Don't load S3PDF unless needed (very slow import with Reportlab)
method = r.method
if method == "import" and r.representation == "pdf":
from s3.s3pdf import S3PDF
set_handler("import", S3PDF(),
http = ("GET", "POST"),
representation="pdf")
# Plugin OrgRoleManager when appropriate
s3base.S3OrgRoleManager.set_method(r)
# Execute the request
output = r(**attr)
if isinstance(output, dict) and \
method in (None,
"report",
"search",
"datatable",
"datatable_f",
"summary"):
if s3.actions is None:
# Add default action buttons
prefix, name, table, tablename = r.target()
authorised = s3_has_permission("update", tablename)
# If a component has components itself, then action buttons
# can be forwarded to the native controller by setting native=True
if r.component and s3db.has_components(table):
native = output.get("native", False)
else:
native = False
# Get table config
get_config = s3db.get_config
listadd = get_config(tablename, "listadd", True)
editable = get_config(tablename, "editable", True) and \
not auth.permission.ownership_required("update", table)
deletable = get_config(tablename, "deletable", True)
copyable = get_config(tablename, "copyable", False)
# URL to open the resource
open_url = r.resource.crud._linkto(r,
authorised=authorised,
update=editable,
native=native)("[id]")
# Add action buttons for Open/Delete/Copy as appropriate
s3_action_buttons(r,
deletable=deletable,
copyable=copyable,
editable=editable,
read_url=open_url,
update_url=open_url
# To use modals
#update_url="%s.popup?refresh=list" % open_url
)
# Override Add-button, link to native controller and put
# the primary key into get_vars for automatic linking
if native and not listadd and \
s3_has_permission("create", tablename):
label = s3base.S3CRUD.crud_string(tablename,
"label_create")
hook = r.resource.components[name]
fkey = "%s.%s" % (name, hook.fkey)
get_vars_copy = get_vars.copy()
get_vars_copy.update({fkey: r.record[hook.fkey]})
url = URL(prefix, name, args=["create"], vars=get_vars_copy)
add_btn = A(label, _href=url, _class="action-btn")
output.update(add_btn=add_btn)
elif method not in ("import",
"review",
"approve",
"reject",
"deduplicate"):
s3.actions = None
if get_vars.tour:
output = s3db.tour_builder(output)
return output
# Enable access to this function from modules
current.rest_controller = s3_rest_controller
# END =========================================================================
| 0.00196 |
# -*- coding:utf-8 -*-
import datetime
import json
from flask import abort
from flask import current_app
from werkzeug.exceptions import BadRequest
from api.extensions import db
from api.extensions import rd
from api.lib.cmdb.cache import AttributeCache
from api.lib.cmdb.cache import CITypeCache
from api.lib.cmdb.ci_type import CITypeAttributeManager
from api.lib.cmdb.ci_type import CITypeManager
from api.lib.cmdb.const import CMDB_QUEUE
from api.lib.cmdb.const import ExistPolicy
from api.lib.cmdb.const import OperateType
from api.lib.cmdb.const import REDIS_PREFIX_CI
from api.lib.cmdb.const import RetKey
from api.lib.cmdb.history import AttributeHistoryManger
from api.lib.cmdb.history import CIRelationHistoryManager
from api.lib.cmdb.search.ci.db.query_sql import QUERY_CIS_BY_IDS
from api.lib.cmdb.search.ci.db.query_sql import QUERY_CIS_BY_VALUE_TABLE
from api.lib.cmdb.utils import TableMap
from api.lib.cmdb.utils import ValueTypeMap
from api.lib.cmdb.value import AttributeValueManager
from api.lib.decorator import kwargs_required
from api.lib.utils import handle_arg_list
from api.models.cmdb import CI
from api.models.cmdb import CIRelation
from api.models.cmdb import CITypeAttribute
from api.models.cmdb import CITypeRelation
from api.tasks.cmdb import ci_cache
from api.tasks.cmdb import ci_delete
from api.tasks.cmdb import ci_relation_cache
from api.tasks.cmdb import ci_relation_delete
class CIManager(object):
""" manage CI interface
"""
def __init__(self):
pass
@staticmethod
def get_type_name(ci_id):
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
return CITypeCache.get(ci.type_id).name
@staticmethod
def confirm_ci_existed(ci_id):
return CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
@classmethod
def get_ci_by_id(cls, ci_id, ret_key=RetKey.NAME, fields=None, need_children=True):
"""
:param ci_id:
:param ret_key: name, id, or alias
:param fields: attribute list
:param need_children:
:return:
"""
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
res = dict()
if need_children:
children = CIRelationManager.get_children(ci_id, ret_key=ret_key) # one floor
res.update(children)
ci_type = CITypeCache.get(ci.type_id)
res["ci_type"] = ci_type.name
res.update(cls.get_cis_by_ids([str(ci_id)], fields=fields, ret_key=ret_key))
res['_type'] = ci_type.id
res['_id'] = ci_id
return res
@staticmethod
def get_ci_by_id_from_db(ci_id, ret_key=RetKey.NAME, fields=None, need_children=True, use_master=False):
"""
:param ci_id:
:param ret_key: name, id or alias
:param fields: list
:param need_children:
:param use_master: whether to use master db
:return:
"""
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
res = dict()
if need_children:
children = CIRelationManager.get_children(ci_id, ret_key=ret_key) # one floor
res.update(children)
ci_type = CITypeCache.get(ci.type_id)
res["ci_type"] = ci_type.name
fields = CITypeAttributeManager.get_attr_names_by_type_id(ci.type_id) if not fields else fields
unique_key = AttributeCache.get(ci_type.unique_id)
_res = AttributeValueManager().get_attr_values(fields,
ci_id,
ret_key=ret_key,
unique_key=unique_key,
use_master=use_master)
res.update(_res)
res['type_id'] = ci_type.id
res['ci_id'] = ci_id
return res
def get_ci_by_ids(self, ci_id_list, ret_key=RetKey.NAME, fields=None):
return [self.get_ci_by_id(ci_id, ret_key=ret_key, fields=fields) for ci_id in ci_id_list]
@classmethod
def get_cis_by_type(cls, type_id, ret_key=RetKey.NAME, fields="", page=1, per_page=None):
cis = db.session.query(CI.id).filter(CI.type_id == type_id).filter(CI.deleted.is_(False))
numfound = cis.count()
cis = cis.offset((page - 1) * per_page).limit(per_page)
ci_ids = [str(ci.id) for ci in cis]
res = cls.get_cis_by_ids(ci_ids, ret_key, fields)
return numfound, page, res
@staticmethod
def ci_is_exist(unique_key, unique_value):
"""
:param unique_key: is a attribute
:param unique_value:
:return:
"""
value_table = TableMap(attr_name=unique_key.name).table
unique = value_table.get_by(attr_id=unique_key.id,
value=unique_value,
to_dict=False,
first=True)
if unique:
return CI.get_by_id(unique.ci_id)
@staticmethod
def _delete_ci_by_id(ci_id):
ci = CI.get_by_id(ci_id)
ci.delete() # TODO: soft delete
@classmethod
def add(cls, ci_type_name, exist_policy=ExistPolicy.REPLACE, _no_attribute_policy=ExistPolicy.IGNORE, **ci_dict):
"""
:param ci_type_name:
:param exist_policy: replace or reject or need
:param _no_attribute_policy: ignore or reject
:param ci_dict:
:return:
"""
ci_type = CITypeManager.check_is_existed(ci_type_name)
unique_key = AttributeCache.get(ci_type.unique_id) or abort(400, 'illegality unique attribute')
unique_value = ci_dict.get(unique_key.name)
unique_value = unique_value or ci_dict.get(unique_key.alias)
unique_value = unique_value or ci_dict.get(unique_key.id)
unique_value = unique_value or abort(400, '{0} missing'.format(unique_key.name))
existed = cls.ci_is_exist(unique_key, unique_value)
if existed is not None:
if exist_policy == ExistPolicy.REJECT:
return abort(400, 'CI is already existed')
if existed.type_id != ci_type.id:
existed.update(type_id=ci_type.id)
ci = existed
else:
if exist_policy == ExistPolicy.NEED:
return abort(404, 'CI <{0}> does not exist'.format(unique_value))
ci = CI.create(type_id=ci_type.id)
ci_type_attrs_name = [attr["name"] for attr in CITypeAttributeManager().get_attributes_by_type_id(ci_type.id)]
value_manager = AttributeValueManager()
for p, v in ci_dict.items():
if p not in ci_type_attrs_name:
current_app.logger.warning('ci_type: {0} not has attribute {1}, please check!'.format(ci_type_name, p))
continue
try:
value_manager.create_or_update_attr_value(p, v, ci, _no_attribute_policy)
except BadRequest as e:
if existed is None:
cls.delete(ci.id)
raise e
ci_cache.apply_async([ci.id], queue=CMDB_QUEUE)
return ci.id
def update(self, ci_id, **ci_dict):
ci = self.confirm_ci_existed(ci_id)
ci_type_attrs_name = [attr["name"] for attr in CITypeAttributeManager().get_attributes_by_type_id(ci.type_id)]
value_manager = AttributeValueManager()
for p, v in ci_dict.items():
if p not in ci_type_attrs_name:
current_app.logger.warning('ci_type: {0} not has attribute {1}, please check!'.format(ci.type_id, p))
continue
try:
value_manager.create_or_update_attr_value(p, v, ci)
except BadRequest as e:
raise e
ci_cache.apply_async([ci_id], queue=CMDB_QUEUE)
@staticmethod
def update_unique_value(ci_id, unique_name, unique_value):
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not found".format(ci_id))
AttributeValueManager().create_or_update_attr_value(unique_name, unique_value, ci)
ci_cache.apply_async([ci_id], queue=CMDB_QUEUE)
@staticmethod
def delete(ci_id):
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not found".format(ci_id))
attrs = CITypeAttribute.get_by(type_id=ci.type_id, to_dict=False)
attr_names = set([AttributeCache.get(attr.attr_id).name for attr in attrs])
for attr_name in attr_names:
value_table = TableMap(attr_name=attr_name).table
for item in value_table.get_by(ci_id=ci_id, to_dict=False):
item.delete()
for item in CIRelation.get_by(first_ci_id=ci_id, to_dict=False):
ci_relation_delete.apply_async(args=(item.first_ci_id, item.second_ci_id), queue=CMDB_QUEUE)
item.delete()
for item in CIRelation.get_by(second_ci_id=ci_id, to_dict=False):
ci_relation_delete.apply_async(args=(item.first_ci_id, item.second_ci_id), queue=CMDB_QUEUE)
item.delete()
ci.delete() # TODO: soft delete
AttributeHistoryManger.add(ci_id, [(None, OperateType.DELETE, None, None)])
ci_delete.apply_async([ci.id], queue=CMDB_QUEUE)
return ci_id
@staticmethod
def add_heartbeat(ci_type, unique_value):
ci_type = CITypeManager().check_is_existed(ci_type)
unique_key = AttributeCache.get(ci_type.unique_id)
value_table = TableMap(attr_name=unique_key.name).table
v = value_table.get_by(attr_id=unique_key.id,
value=unique_value,
to_dict=False,
first=True) \
or abort(404, "not found")
ci = CI.get_by_id(v.ci_id) or abort(404, "CI <{0}> is not found".format(v.ci_id))
ci.update(heartbeat=datetime.datetime.now())
@classmethod
@kwargs_required("type_id", "page")
def get_heartbeat(cls, **kwargs):
query = db.session.query(CI.id, CI.heartbeat).filter(CI.deleted.is_(False))
expire = datetime.datetime.now() - datetime.timedelta(minutes=72)
type_ids = handle_arg_list(kwargs["type_id"])
query = query.filter(CI.type_id.in_(type_ids))
page = kwargs.get("page")
agent_status = kwargs.get("agent_status")
if agent_status == -1:
query = query.filter(CI.heartbeat.is_(None))
elif agent_status == 0:
query = query.filter(CI.heartbeat <= expire)
elif agent_status == 1:
query = query.filter(CI.heartbeat > expire)
numfound = query.count()
per_page_count = current_app.config.get("DEFAULT_PAGE_COUNT")
cis = query.offset((page - 1) * per_page_count).limit(per_page_count).all()
ci_ids = [ci.id for ci in cis]
heartbeat_dict = {}
for ci in cis:
if agent_status is not None:
heartbeat_dict[ci.id] = agent_status
else:
if ci.heartbeat is None:
heartbeat_dict[ci.id] = -1
elif ci.heartbeat <= expire:
heartbeat_dict[ci.id] = 0
else:
heartbeat_dict[ci.id] = 1
current_app.logger.debug(heartbeat_dict)
ci_ids = list(map(str, ci_ids))
res = cls.get_cis_by_ids(ci_ids, fields=["hostname", "private_ip"])
result = [(i.get("hostname"), i.get("private_ip")[0], i.get("ci_type"),
heartbeat_dict.get(i.get("_id"))) for i in res
if i.get("private_ip")]
return numfound, result
@staticmethod
def _get_cis_from_cache(ci_ids, ret_key=RetKey.NAME, fields=None):
res = rd.get(ci_ids, REDIS_PREFIX_CI)
if res is not None and None not in res and ret_key == RetKey.NAME:
res = list(map(json.loads, res))
if not fields:
return res
else:
_res = []
for d in res:
_d = dict()
_d["_id"], _d["_type"] = d.get("_id"), d.get("_type")
_d["ci_type"] = d.get("ci_type")
for field in fields:
_d[field] = d.get(field)
_res.append(_d)
return _res
@staticmethod
def _get_cis_from_db(ci_ids, ret_key=RetKey.NAME, fields=None, value_tables=None):
if not fields:
filter_fields_sql = ""
else:
_fields = list()
for field in fields:
attr = AttributeCache.get(field)
if attr is not None:
_fields.append(str(attr.id))
filter_fields_sql = "WHERE A.attr_id in ({0})".format(",".join(_fields))
ci_ids = ",".join(ci_ids)
if value_tables is None:
value_tables = ValueTypeMap.table_name.values()
value_sql = " UNION ".join([QUERY_CIS_BY_VALUE_TABLE.format(value_table, ci_ids)
for value_table in value_tables])
query_sql = QUERY_CIS_BY_IDS.format(filter_fields_sql, value_sql)
# current_app.logger.debug(query_sql)
cis = db.session.execute(query_sql).fetchall()
ci_set = set()
res = list()
ci_dict = dict()
for ci_id, type_id, attr_id, attr_name, attr_alias, value, value_type, is_list in cis:
if ci_id not in ci_set:
ci_dict = dict()
ci_type = CITypeCache.get(type_id)
ci_dict["ci_id"] = ci_id
ci_dict["ci_type"] = type_id
ci_dict["ci_type"] = ci_type.name
ci_dict["ci_type_alias"] = ci_type.alias
ci_set.add(ci_id)
res.append(ci_dict)
if ret_key == RetKey.NAME:
attr_key = attr_name
elif ret_key == RetKey.ALIAS:
attr_key = attr_alias
elif ret_key == RetKey.ID:
attr_key = attr_id
else:
return abort(400, "invalid ret key")
value = ValueTypeMap.serialize2[value_type](value)
if is_list:
ci_dict.setdefault(attr_key, []).append(value)
else:
ci_dict[attr_key] = value
return res
@classmethod
def get_cis_by_ids(cls, ci_ids, ret_key=RetKey.NAME, fields=None, value_tables=None):
"""
:param ci_ids: list of CI instance ID, eg. ['1', '2']
:param ret_key: name, id or alias
:param fields:
:param value_tables:
:return:
"""
if not ci_ids:
return []
fields = [] if fields is None or not isinstance(fields, list) else fields
ci_id_tuple = tuple(map(int, ci_ids))
res = cls._get_cis_from_cache(ci_id_tuple, ret_key, fields)
if res is not None:
return res
current_app.logger.warning("cache not hit...............")
return cls._get_cis_from_db(ci_ids, ret_key, fields, value_tables)
class CIRelationManager(object):
"""
Manage relation between CIs
"""
def __init__(self):
pass
@classmethod
def get_children(cls, ci_id, ret_key=RetKey.NAME):
second_cis = CIRelation.get_by(first_ci_id=ci_id, to_dict=False)
second_ci_ids = (second_ci.second_ci_id for second_ci in second_cis)
ci_type2ci_ids = dict()
for ci_id in second_ci_ids:
type_id = CI.get_by_id(ci_id).type_id
ci_type2ci_ids.setdefault(type_id, []).append(ci_id)
res = {}
for type_id in ci_type2ci_ids:
ci_type = CITypeCache.get(type_id)
children = CIManager.get_cis_by_ids(list(map(str, ci_type2ci_ids[type_id])), ret_key=ret_key)
res[ci_type.name] = children
return res
@staticmethod
def get_second_cis(first_ci_id, relation_type_id=None, page=1, per_page=None):
second_cis = db.session.query(CI.id).filter(CI.deleted.is_(False)).join(
CIRelation, CIRelation.second_ci_id == CI.id).filter(
CIRelation.first_ci_id == first_ci_id).filter(CIRelation.deleted.is_(False))
if relation_type_id is not None:
second_cis = second_cis.filter(CIRelation.relation_type_id == relation_type_id)
numfound = second_cis.count()
if per_page != "all":
second_cis = second_cis.offset((page - 1) * per_page).limit(per_page).all()
ci_ids = [str(son.id) for son in second_cis]
result = CIManager.get_cis_by_ids(ci_ids)
return numfound, len(ci_ids), result
@staticmethod
def _sort_handler(sort_by, query_sql):
if sort_by.startswith("+"):
sort_type = "asc"
sort_by = sort_by[1:]
elif sort_by.startswith("-"):
sort_type = "desc"
sort_by = sort_by[1:]
else:
sort_type = "asc"
attr = AttributeCache.get(sort_by)
if attr is None:
return query_sql
attr_id = attr.id
value_table = TableMap(attr_name=sort_by).table
ci_table = query_sql.subquery()
query_sql = db.session.query(ci_table.c.id, value_table.value).join(
value_table, value_table.ci_id == ci_table.c.id).filter(
value_table.attr_id == attr_id).filter(ci_table.deleted.is_(False)).order_by(
getattr(value_table.value, sort_type)())
return query_sql
@classmethod
def get_first_cis(cls, second_ci, relation_type_id=None, page=1, per_page=None):
first_cis = db.session.query(CIRelation.first_ci_id).filter(
CIRelation.second_ci_id == second_ci).filter(CIRelation.deleted.is_(False))
if relation_type_id is not None:
first_cis = first_cis.filter(CIRelation.relation_type_id == relation_type_id)
numfound = first_cis.count()
if per_page != "all":
first_cis = first_cis.offset((page - 1) * per_page).limit(per_page).all()
first_ci_ids = [str(first_ci.first_ci_id) for first_ci in first_cis]
result = CIManager.get_cis_by_ids(first_ci_ids)
return numfound, len(first_ci_ids), result
@classmethod
def add(cls, first_ci_id, second_ci_id, more=None, relation_type_id=None, many_to_one=False):
first_ci = CIManager.confirm_ci_existed(first_ci_id)
second_ci = CIManager.confirm_ci_existed(second_ci_id)
existed = CIRelation.get_by(first_ci_id=first_ci_id,
second_ci_id=second_ci_id,
to_dict=False,
first=True)
if existed is not None:
if existed.relation_type_id != relation_type_id and relation_type_id is not None:
existed.update(relation_type_id=relation_type_id)
CIRelationHistoryManager().add(existed, OperateType.UPDATE)
else:
if relation_type_id is None:
type_relation = CITypeRelation.get_by(parent_id=first_ci.type_id,
child_id=second_ci.type_id,
first=True,
to_dict=False)
relation_type_id = type_relation and type_relation.relation_type_id
relation_type_id or abort(404, "Relation {0} <-> {1} is not found".format(
first_ci.ci_type.name, second_ci.ci_type.name))
if many_to_one:
for item in CIRelation.get_by(second_ci_id=second_ci_id,
relation_type_id=relation_type_id,
to_dict=False):
item.soft_delete()
his_manager = CIRelationHistoryManager()
his_manager.add(item, operate_type=OperateType.DELETE)
ci_relation_delete.apply_async(args=(item.first_ci_id, item.second_ci_id), queue=CMDB_QUEUE)
existed = CIRelation.create(first_ci_id=first_ci_id,
second_ci_id=second_ci_id,
relation_type_id=relation_type_id)
CIRelationHistoryManager().add(existed, OperateType.ADD)
ci_relation_cache.apply_async(args=(first_ci_id, second_ci_id), queue=CMDB_QUEUE)
if more is not None:
existed.upadte(more=more)
return existed.id
@staticmethod
def delete(cr_id):
cr = CIRelation.get_by_id(cr_id) or abort(404, "CIRelation <{0}> is not existed".format(cr_id))
cr.delete()
his_manager = CIRelationHistoryManager()
his_manager.add(cr, operate_type=OperateType.DELETE)
ci_relation_delete.apply_async(args=(cr.first_ci_id, cr.second_ci_id), queue=CMDB_QUEUE)
return cr_id
@classmethod
def delete_2(cls, first_ci_id, second_ci_id):
cr = CIRelation.get_by(first_ci_id=first_ci_id,
second_ci_id=second_ci_id,
to_dict=False,
first=True)
ci_relation_delete.apply_async(args=(first_ci_id, second_ci_id), queue=CMDB_QUEUE)
return cls.delete(cr.id)
@classmethod
def batch_update(cls, ci_ids, parents):
"""
only for many to one
:param ci_ids:
:param parents:
:return:
"""
from api.lib.cmdb.utils import TableMap
if parents is not None and isinstance(parents, dict):
for attr_name in parents:
if parents[attr_name]:
attr = AttributeCache.get(attr_name)
value_table = TableMap(attr_name=attr.name).table
parent = value_table.get_by(attr_id=attr.id, value=parents[attr_name], first=True, to_dict=False)
if not parent:
return abort(404, "{0}: {1} is not found".format(attr_name, parents[attr_name]))
parent_id = parent.ci_id
for ci_id in ci_ids:
cls.add(parent_id, ci_id, many_to_one=True)
| 0.003839 |
from numpy import *
import random
import logging
import argparse
from random import randint
import os
import os.path
import time
from AbstractSampler import AbstractSampler
def myArgmin(A):
# A is assumed to be a 1D array
bottomInds = nonzero(A==A.min())[0]
return bottomInds[randint(0,bottomInds.shape[0]-1)]
def myArgmax(A):
# A is assumed to be a 1D array
topInds = nonzero(A==A.max())[0]
return topInds[randint(0,topInds.shape[0]-1)]
class armTree:
def __init__(self,iArms,batch_size=4):
self.batch_size = int(batch_size)
self.armGroups = []
nAG = int(ceil(float(len(iArms))/self.batch_size))
Inds = batch_size * arange(nAG+1)
Inds[-1] = len(iArms)
for i in range(len(Inds)-1):
self.armGroups.append([iArms[j] for j in range(Inds[i],Inds[i+1])])
def pruneGroup(self,i,UCB):
group = self.armGroups[i]
if len(group) != UCB.shape[0]:
logging.info("ERROR: The size of the batch and the dimensions of "+\
"UCB matrix do NOT match up. Batch = %s and matrix "+\
"is = %s" % (group,UCB))
L,W = nonzero(UCB < 0.5)
for ind in range(L.shape[0]):
self.armGroups[i].pop(L[ind])
return L.shape[0] > 0
def mergeGroups(self):
oldAG = self.armGroups[:]
random.shuffle(oldAG)
oldAG.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.armGroups = []
i = 0; j = len(oldAG)-1
while i <= j:
if i == j:
self.armGroups.append(oldAG[i])
break
elif len(oldAG[i]) + len(oldAG[j]) > self.batch_size * 1.5:
self.armGroups.append(oldAG[j])
j = j-1
else:
self.armGroups.append(oldAG[i]+oldAG[j])
i = i+1; j = j-1
def mergePairOfBatches(self,i,j):
self.armGroups[i] = self.armGroups[i] + self.armGroups.pop(j)
def numArms(self):
return sum([len(ag) for ag in self.armGroups])
def __getitem__(self,key):
return self.armGroups[key % len(self.armGroups)]
def __len__(self):
return len(self.armGroups)
def index(self,batch):
return self.armGroups.index(batch)
class mergeRUCBSampler(AbstractSampler):
def __init__(self, arms=[], arg_str="", run_count=""):
parser = argparse.ArgumentParser(prog=self.__class__.__name__)
parser.add_argument("--sampler", type=str)
parser.add_argument("--RUCB_alpha_parameter", type=float, default=0.5)
parser.add_argument("--mergeRUCB_batch_size", type=int, default=4)
parser.add_argument("--mergeRUCB_delta", type=float, default=0.01)
parser.add_argument("--continue_sampling_experiment", type=str,
default="No")
parser.add_argument("--old_output_dir", type=str, default="")
parser.add_argument("--old_output_prefix", type=str, default="")
args = vars(parser.parse_known_args(arg_str.split())[0])
self.nArms = len(arms) # Number of arms
self.initArms = arms[:]
self.lArms = arms # Arms as a list of arms
if args["continue_sampling_experiment"] != "Yes":
random.shuffle(self.lArms)
self.iArms = range(self.nArms) # The indices of the arms
self.dictArms = dict(zip(self.lArms,self.iArms))
# A dictionary taking arms to their indices.
self.RealWins = ones([self.nArms, self.nArms])
self.numPlays = 2*ones([self.nArms, self.nArms])
self.PMat = self.RealWins / self.numPlays
self.invSqrtNumPlays = 1./sqrt(self.numPlays)
logging.info("Number of arms = %d" % self.nArms)
logging.info("Set of arms: %s" % arms)
self.alpha = args["RUCB_alpha_parameter"]
self.batchSize = args["mergeRUCB_batch_size"]
self.delta = args["mergeRUCB_delta"] # Prob of failure
self.tArms = armTree(self.iArms,self.batchSize)
self.UCB = ones([self.batchSize,self.batchSize])
self.currentBatch = 0
self.iteration = 1
self.C = (((4*self.alpha-1)*(self.nArms**2)) /
((2*self.alpha-1)*self.delta))**(1/(2*self.alpha-1))
self.t = ceil(self.C)+1
self.chatty = False
if run_count == "":
self.runMessage = ""
else:
self.runMessage = "Run %s: " % str(run_count)
old_output_dir = args["old_output_dir"]
old_output_prefix = args["old_output_prefix"]
if args["continue_sampling_experiment"] == "Yes" and \
old_output_dir != "" and old_output_prefix != "":
old_file = os.path.join(old_output_dir, "%s-%d.npz" \
% (old_output_prefix,int(run_count)))
data = load(old_file)
time.sleep(int(run_count))
self.t = data['time']+ceil(self.C)+1
# print "[self.lArms.index(a) for a in self.initArms] = ", [self.lArms.index(a) for a in self.initArms]
Inds = [self.initArms.index(a) for a in self.lArms]
# print "[self.initArms.index(a) for a in self.lArms] = ", Inds
self.RealWins = data['RealWins'][ix_(Inds,Inds)]
self.numPlays = self.RealWins + self.RealWins.T
self.PMat = self.RealWins / self.numPlays
self.invSqrtNumPlays = 1./sqrt(self.numPlays)
# print "data['armGroups'] = ", data['armGroups']
# print "data['RealWins'] = \n", data['RealWins']
self.tArms.armGroups = [[self.lArms.index(self.initArms[a])
for a in ag]
for ag in data['armGroups'].tolist()]
# print self.tArms.armGroups
self.iteration = int(data['iteration'])
self.currentBatch = int(self.t-ceil(self.C)) % len(self.tArms)
data.close()
logging.info("Done reading "+old_file)
# print "RealWins = \n", self.RealWins
def getUCB(self):
Inds = self.tArms[self.currentBatch]
while len(Inds) <= 1 and len(self.tArms) > 1:
self.currentBatch = (self.currentBatch+1) % len(self.tArms)
Inds = self.tArms[self.currentBatch]
self.UCB = self.PMat[ix_(Inds,Inds)] + \
sqrt(self.alpha*log(self.t)) * self.invSqrtNumPlays[ix_(Inds,Inds)]
fill_diagonal(self.UCB,.5)
def sampleTournament(self,withFig=False):
self.getUCB()
while self.tArms.pruneGroup(self.currentBatch, self.UCB):
self.getUCB()
arms = self.tArms[self.currentBatch]
champ = arms[randint(0,len(arms)-1)]
return champ
def doUCBRelChamp(self,champ,withFig=False):
champInd = self.tArms[self.currentBatch].index(champ)
ucb = self.UCB[:,champInd]
ucb[champInd] = 0
challengerInd = myArgmax(ucb)
challenger = self.tArms[self.currentBatch][challengerInd]
return challenger
def get_arms(self,withFig=False):
# This returns two arms to compare.
firstPlace = self.sampleTournament(withFig)
secondPlace = self.doUCBRelChamp(firstPlace)
r1 = self.lArms[firstPlace]
r2 = self.lArms[secondPlace]
i1 = self.initArms.index(r1)
i2 = self.initArms.index(r2)
return r1, r2, i1, i2
def update_scores(self,r_winner,r_loser):
# This method can be used to update the scores.
winner = self.dictArms[r_winner]
loser = self.dictArms[r_loser]
if (self.t - ceil(self.C)) % 1000 == 0.:
ArmGroups = [sorted([self.initArms.index(self.lArms[i]) \
for i in ag]) \
for ag in self.tArms.armGroups]
logging.info("%s%d- Number of surviving arms: %d "\
% (self.runMessage, self.t - ceil(self.C),
sum([len(ag) for ag in ArmGroups]))+\
"Surviving groups of arms: %s " \
% ArmGroups)
W = self.RealWins
UCB = W/(W+W.T) + sqrt(self.alpha*log(self.t)/(W+W.T))
Inds = [self.lArms.index(a) for a in self.initArms]
# print "AG = ", self.tArms.armGroups, "\n W = \n", W[ix_(Inds,Inds)], "\n(alpha,t) = ", (self.alpha,self.t) , "\n UCB = \n", UCB[ix_(Inds,Inds)] * (W[ix_(Inds,Inds)] > 1)
if self.tArms.numArms() <= self.nArms/(2**self.iteration)+1 \
and len(self.tArms) > 1:
self.tArms.mergeGroups()
if min([len(a) for a in self.tArms.armGroups])<=0.5*self.batchSize:
self.tArms.mergeGroups()
self.iteration = self.iteration + 1
logging.info("%s%d- Iteration %d" \
% (self.runMessage, self.t - ceil(self.C),
self.iteration))
self.RealWins[winner,loser] += 1
self.numPlays[winner,loser] += 1
self.invSqrtNumPlays[winner,loser] = \
1./sqrt(self.numPlays[winner,loser])
self.PMat[winner,loser] = \
self.RealWins[winner,loser]/self.numPlays[winner,loser]
self.numPlays[loser,winner] += 1
self.invSqrtNumPlays[loser,winner] = \
1./sqrt(self.numPlays[loser,winner])
self.PMat[loser,winner] = \
self.RealWins[loser,winner]/self.numPlays[loser,winner]
self.currentBatch = (self.currentBatch+1) % len(self.tArms)
self.t = self.t + 1
return self.initArms.index(r_winner)
def get_winner(self):
# This method can be called to find out which arm is the best so far.
self.numPlays = self.RealWins+self.RealWins.T
PMat = self.RealWins / self.numPlays
self.champ = myArgmax((PMat > 0.5).sum(axis=1))
logging.info("mergeRUCBSampler.get_winner() was called!")
return self.lArms[self.champ]
#################### OLD CODE ########################
###### V3:
# def isReady(self,UCB,width):
# sideUCB = sign(UCB+eye(UCB.shape[0])-0.5)
# LCB = 1-UCB.T
# sideLCB = sign(LCB+eye(UCB.shape[0])-0.5)
# isClear = ((sideUCB * sideLCB) > 0).all(axis=1).any()
# isClear = isClear & (UCB-LCB < width).all()
# return isClear
#
# def readyBatches(self,UCB,width):
# ready = zeros(len(self.armGroups))
# for ind in range(len(self.armGroups)):
# group = self.armGroups[ind]
# ucb = UCB[ix_(group,group)]
# ready[ind] = self.isReady(ucb,width)
# return ready
#
# def getLosers(self,UCB):
# Losers = []
# for ag in self.armGroups:
# ucb = UCB[ix_(ag,ag)]
# losers = [ag[ind] for ind in nonzero((ucb < 0.5).any(axis=1))[0]]
# Losers = Losers + losers
# return Losers
# def getFullUCB(self):
# rWins = self.RealWins
# A = rWins
# B = rWins.T
# N = maximum(A+B,ones(A.shape))
# UCB = A/N + sqrt(self.alpha*log(self.t)/N)
# fill_diagonal(UCB,.5)
# return UCB
#
# def sampleTournament(self,withFig=False):
# self.getUCB()
# wins = (self.UCB >= .5).sum(axis=1)
# champInd = myArgmax(potentialChamps)
# champ = self.tArms[self.currentBatch][champInd]
# return champ # * UCB.max(axis=1))
#
#
# def update_scores(self,r_winner,r_loser):
# # This method can be used to update the scores.
# winner = self.dictArms[r_winner]
# loser = self.dictArms[r_loser]
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# if self.t % max(self.nArms,1000) == 0:
# fullUCB = self.getFullUCB()
# Losers = self.tArms.getLosers(fullUCB)
# lWinners = [self.lArms[ind] for ind in set(self.iArms)-set(Losers)]
# logging.info("%s%d- Number of surviving arms: %d "\
# % (self.runMessage, self.t,
# self.nArms - len(Losers))+\
# "Surviving arms: %s" \
# % sorted([self.initArms.index(a) for a in lWinners]))
# readyBatches = self.tArms.readyBatches(fullUCB,self.width1)
# if readyBatches.sum() > 0.75*len(self.tArms):
# self.tArms.mergeGroups()
# self.iteration = self.iteration + 1
# logging.info("%s%d- Iteration %d" \
# % (self.runMessage, self.t, self.iteration))
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# self.numPlays[winner,loser] = self.numPlays[winner,loser] + 1
# self.invSqrtNumPlays[winner,loser] = \
# 1./sqrt(self.numPlays[winner,loser])
# self.PMat[winner,loser] = \
# self.RealWins[winner,loser]/self.numPlays[winner,loser]
# self.numPlays[loser,winner] = self.numPlays[loser,winner] + 1
# self.invSqrtNumPlays[loser,winner] = \
# 1./sqrt(self.numPlays[loser,winner])
# self.PMat[loser,winner] = \
# self.RealWins[loser,winner]/self.numPlays[loser,winner]
# self.currentBatch = (self.currentBatch+1) % len(self.tArms)
# self.t = self.t + 1
# return self.initArms.index(r_winner)
###### V2: would skip over batches that had a clear winner
# def getUCB(self):
# keepLooking = True
# while keepLooking:
# Inds = self.tArms[self.currentBatch % len(self.tArms)]
# tempUCB = self.PMat[ix_(Inds,Inds)] + \
# sqrt(self.alpha*log(self.t)) * self.invSqrtNumPlays[ix_(Inds,Inds)]
# fill_diagonal(tempUCB,.5)
# keepLooking = self.tArms.isReady(tempUCB,self.width2)
# self.currentBatch = (self.currentBatch+1) % len(self.tArms)
# self.currentBatch = (self.currentBatch-1) % len(self.tArms)
# self.UCB = tempUCB
###### V1: merging would happen when enough arms were defeated. ##########
# def mergeGroups(self):
# oldAG = self.armGroups[:]
# self.armGroups = []
# for i in range(len(oldAG)/2):
# self.armGroups.append(oldAG[2*i]+oldAG[2*i+1])
# if mod(len(oldAG),2) == 1:
# self.armGroups.append(oldAG[-1])
#
#
# def getUCB(self):
# Inds = self.tArms[self.currentBatch % len(self.tArms)]
# self.UCB = self.PMat[ix_(Inds,Inds)] + \
# sqrt(self.alpha*log(self.t)) * self.invSqrtNumPlays[ix_(Inds,Inds)]
# fill_diagonal(self.UCB,.5)
#
#
# def getFullUCB(self):
# rWins = self.RealWins
# A = rWins
# B = rWins.T
# N = maximum(A+B,ones(A.shape))
# UCB = A/N + sqrt(self.alpha*log(self.t)/N)
# fill_diagonal(UCB,.5)
# return UCB
#
# def sampleTournament(self,withFig=False):
# self.getUCB()
# potentialChamps = (self.UCB >= .5).all(axis=1)
# champInd = myArgmax(potentialChamps)
# champ = self.tArms[self.currentBatch][champInd]
# return champ # * UCB.max(axis=1))
#
#
# def doUCBRelChamp(self,champ,withFig=False):
# champInd = self.tArms[self.currentBatch].index(champ)
# ucb = self.UCB[:,champInd]
# if len(self.tArms) > 1:
# ucb[champInd] = 0
# challengerInd = myArgmax(ucb)
# challenger = self.tArms[self.currentBatch][challengerInd]
# return challenger
#
#
# def get_arms(self,withFig=False):
# # This returns two arms to compare.
# firstPlace = self.sampleTournament(withFig)
# secondPlace = self.doUCBRelChamp(firstPlace)
# r1 = self.lArms[firstPlace]
# r2 = self.lArms[secondPlace]
# i1 = self.initArms.index(r1)
# i2 = self.initArms.index(r2)
# return r1, r2, i1, i2
#
#
# def update_scores(self,r_winner,r_loser):
# # This method can be used to update the scores.
# winner = self.dictArms[r_winner]
# loser = self.dictArms[r_loser]
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# if self.t % max(self.nArms,1000) == 0:
# Losers = self.tArms.getLosers(self.getFullUCB())
# lWinners = [self.lArms[ind] for ind in set(self.iArms)-set(Losers)]
# logging.info("%s%d- Number of surviving arms: %d "\
# % (self.runMessage, self.t,
# self.nArms - len(Losers))+\
# "Surviving arms: %s" \
# % sorted([self.initArms.index(a) for a in lWinners]))
# nPotentialChamps = self.nArms - len(Losers)
# if nPotentialChamps < 1.5*len(self.tArms):
# self.tArms.mergeGroups()
# self.iteration = self.iteration + 1
# logging.info("%s%d- Iteration %d" \
# % (self.runMessage, self.t, self.iteration))
# self.currentBatch = (self.currentBatch+1) % len(self.tArms)
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# self.numPlays[winner,loser] = self.numPlays[winner,loser] + 1
# self.invSqrtNumPlays[winner,loser] = \
# 1./sqrt(self.numPlays[winner,loser])
# self.PMat[winner,loser] = \
# self.RealWins[winner,loser]/self.numPlays[winner,loser]
# self.numPlays[loser,winner] = self.numPlays[loser,winner] + 1
# self.invSqrtNumPlays[loser,winner] = \
# 1./sqrt(self.numPlays[loser,winner])
# self.PMat[loser,winner] = \
# self.RealWins[loser,winner]/self.numPlays[loser,winner]
# self.t = self.t + 1
# return self.initArms.index(r_winner)
| 0.007047 |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 16